From 8841091f9c7c9e88e6a5948a656af1bc29cfcd58 Mon Sep 17 00:00:00 2001 From: Sachin Kumar <46507027+sachinmsft@users.noreply.github.com> Date: Wed, 13 Mar 2019 13:31:29 -0700 Subject: [PATCH] Container changes (#320) Added container metrics --- appveyor.yml | 1 + collector/container.go | 282 ++ docs/collector.container.md | 40 + .../github.com/Microsoft/go-winio/.gitignore | 1 + vendor/github.com/Microsoft/go-winio/LICENSE | 22 + .../github.com/Microsoft/go-winio/README.md | 22 + .../Microsoft/go-winio/archive/tar/LICENSE | 27 + .../Microsoft/go-winio/archive/tar/common.go | 344 +++ .../go-winio/archive/tar/example_test.go | 80 + .../Microsoft/go-winio/archive/tar/reader.go | 1002 ++++++++ .../go-winio/archive/tar/reader_test.go | 1125 ++++++++ .../go-winio/archive/tar/stat_atim.go | 20 + .../go-winio/archive/tar/stat_atimespec.go | 20 + .../go-winio/archive/tar/stat_unix.go | 32 + .../go-winio/archive/tar/tar_test.go | 325 +++ .../Microsoft/go-winio/archive/tar/writer.go | 444 ++++ .../go-winio/archive/tar/writer_test.go | 739 ++++++ .../github.com/Microsoft/go-winio/backup.go | 280 ++ .../Microsoft/go-winio/backup_test.go | 255 ++ .../Microsoft/go-winio/backuptar/noop.go | 4 + .../Microsoft/go-winio/backuptar/tar.go | 439 ++++ .../Microsoft/go-winio/backuptar/tar_test.go | 84 + vendor/github.com/Microsoft/go-winio/ea.go | 137 + .../github.com/Microsoft/go-winio/ea_test.go | 89 + vendor/github.com/Microsoft/go-winio/file.go | 307 +++ .../github.com/Microsoft/go-winio/fileinfo.go | 61 + .../Microsoft/go-winio/internal/etw/etw.go | 15 + .../go-winio/internal/etw/eventdata.go | 65 + .../internal/etw/eventdatadescriptor.go | 29 + .../go-winio/internal/etw/eventdescriptor.go | 67 + .../go-winio/internal/etw/eventmetadata.go | 177 ++ .../go-winio/internal/etw/eventopt.go | 63 + .../go-winio/internal/etw/fieldopt.go | 379 +++ .../go-winio/internal/etw/provider.go | 279 ++ .../go-winio/internal/etw/providerglobal.go | 52 + .../go-winio/internal/etw/ptr64_32.go | 16 + .../go-winio/internal/etw/ptr64_64.go | 15 + .../go-winio/internal/etw/sample/sample.go | 91 + .../go-winio/internal/etw/zsyscall_windows.go | 78 + vendor/github.com/Microsoft/go-winio/pipe.go | 421 +++ .../Microsoft/go-winio/pipe_test.go | 516 ++++ .../Microsoft/go-winio/pkg/etwlogrus/hook.go | 192 ++ .../go-winio/pkg/etwlogrus/hook_test.go | 126 + .../Microsoft/go-winio/privilege.go | 202 ++ .../Microsoft/go-winio/privileges_test.go | 17 + .../github.com/Microsoft/go-winio/reparse.go | 128 + vendor/github.com/Microsoft/go-winio/sd.go | 98 + .../github.com/Microsoft/go-winio/sd_test.go | 26 + .../github.com/Microsoft/go-winio/syscall.go | 3 + .../go-winio/tools/etw-provider-gen/main.go | 25 + .../go-winio/vhd/mksyscall_windows.go | 901 +++++++ .../github.com/Microsoft/go-winio/vhd/vhd.go | 108 + .../github.com/Microsoft/go-winio/vhd/zvhd.go | 99 + .../Microsoft/go-winio/wim/decompress.go | 138 + .../Microsoft/go-winio/wim/lzx/lzx.go | 606 +++++ .../go-winio/wim/validate/validate.go | 51 + .../github.com/Microsoft/go-winio/wim/wim.go | 866 +++++++ .../Microsoft/go-winio/zsyscall_windows.go | 520 ++++ .../github.com/Microsoft/hcsshim/.gitignore | 1 + .../Microsoft/hcsshim/.gometalinter.json | 17 + vendor/github.com/Microsoft/hcsshim/LICENSE | 21 + vendor/github.com/Microsoft/hcsshim/README.md | 41 + .../github.com/Microsoft/hcsshim/appveyor.yml | 29 + .../Microsoft/hcsshim/cmd/runhcs/LICENSE | 191 ++ .../Microsoft/hcsshim/cmd/runhcs/NOTICE | 22 + .../Microsoft/hcsshim/cmd/runhcs/container.go | 848 +++++++ .../hcsshim/cmd/runhcs/create-scratch.go | 76 + .../Microsoft/hcsshim/cmd/runhcs/create.go | 100 + .../Microsoft/hcsshim/cmd/runhcs/delete.go | 73 + .../Microsoft/hcsshim/cmd/runhcs/exec.go | 160 ++ .../Microsoft/hcsshim/cmd/runhcs/kill.go | 193 ++ .../Microsoft/hcsshim/cmd/runhcs/kill_test.go | 111 + .../Microsoft/hcsshim/cmd/runhcs/list.go | 116 + .../Microsoft/hcsshim/cmd/runhcs/main.go | 174 ++ .../Microsoft/hcsshim/cmd/runhcs/pause.go | 58 + .../Microsoft/hcsshim/cmd/runhcs/ps.go | 51 + .../cmd/runhcs/resource_windows_386.syso | Bin 0 -> 968 bytes .../cmd/runhcs/resource_windows_amd64.syso | Bin 0 -> 968 bytes .../Microsoft/hcsshim/cmd/runhcs/run.go | 64 + .../hcsshim/cmd/runhcs/runhcs.exe.manifest | 10 + .../Microsoft/hcsshim/cmd/runhcs/shim.go | 323 +++ .../Microsoft/hcsshim/cmd/runhcs/signalmap.go | 46 + .../Microsoft/hcsshim/cmd/runhcs/spec.go | 42 + .../Microsoft/hcsshim/cmd/runhcs/start.go | 43 + .../Microsoft/hcsshim/cmd/runhcs/state.go | 49 + .../Microsoft/hcsshim/cmd/runhcs/tty.go | 56 + .../Microsoft/hcsshim/cmd/runhcs/utils.go | 52 + .../hcsshim/cmd/runhcs/utils_test.go | 39 + .../hcsshim/cmd/runhcs/versioninfo.json | 43 + .../Microsoft/hcsshim/cmd/runhcs/vm.go | 209 ++ .../hcsshim/cmd/tar2ext4/tar2ext4.go | 64 + .../Microsoft/hcsshim/cmd/wclayer/create.go | 36 + .../Microsoft/hcsshim/cmd/wclayer/export.go | 66 + .../Microsoft/hcsshim/cmd/wclayer/import.go | 74 + .../Microsoft/hcsshim/cmd/wclayer/mount.go | 88 + .../Microsoft/hcsshim/cmd/wclayer/remove.go | 31 + .../cmd/wclayer/resource_windows_386.syso | Bin 0 -> 969 bytes .../cmd/wclayer/resource_windows_amd64.syso | Bin 0 -> 969 bytes .../hcsshim/cmd/wclayer/versioninfo.json | 43 + .../hcsshim/cmd/wclayer/wclayer.exe.manifest | 10 + .../Microsoft/hcsshim/cmd/wclayer/wclayer.go | 60 + .../github.com/Microsoft/hcsshim/container.go | 192 ++ vendor/github.com/Microsoft/hcsshim/errors.go | 257 ++ .../ext4/internal/compactext4/compact.go | 1263 +++++++++ .../ext4/internal/compactext4/compact_test.go | 355 +++ .../internal/compactext4/verify_linux_test.go | 248 ++ .../ext4/internal/compactext4/verify_test.go | 18 + .../hcsshim/ext4/internal/format/format.go | 411 +++ .../hcsshim/ext4/tar2ext4/tar2ext4.go | 174 ++ .../hcsshim/ext4/tar2ext4/vhdfooter.go | 76 + .../Microsoft/hcsshim/functional_tests.ps1 | 12 + .../github.com/Microsoft/hcsshim/hcn/hcn.go | 177 ++ .../Microsoft/hcsshim/hcn/hcnendpoint.go | 366 +++ .../Microsoft/hcsshim/hcn/hcnendpoint_test.go | 298 +++ .../Microsoft/hcsshim/hcn/hcnerrors.go | 95 + .../Microsoft/hcsshim/hcn/hcnerrors_test.go | 34 + .../Microsoft/hcsshim/hcn/hcnglobals.go | 87 + .../Microsoft/hcsshim/hcn/hcnloadbalancer.go | 335 +++ .../hcsshim/hcn/hcnloadbalancer_test.go | 260 ++ .../Microsoft/hcsshim/hcn/hcnnamespace.go | 424 ++++ .../hcsshim/hcn/hcnnamespace_test.go | 451 ++++ .../Microsoft/hcsshim/hcn/hcnnetwork.go | 418 +++ .../Microsoft/hcsshim/hcn/hcnnetwork_test.go | 165 ++ .../Microsoft/hcsshim/hcn/hcnpolicy.go | 217 ++ .../Microsoft/hcsshim/hcn/hcnsupport.go | 71 + .../Microsoft/hcsshim/hcn/hcnsupport_test.go | 62 + .../Microsoft/hcsshim/hcn/hcnutils_test.go | 267 ++ .../Microsoft/hcsshim/hcn/hcnv1schema_test.go | 111 + .../Microsoft/hcsshim/hcn/hnsv1_test.go | 97 + .../Microsoft/hcsshim/hcn/zsyscall_windows.go | 714 ++++++ .../github.com/Microsoft/hcsshim/hcsshim.go | 28 + .../Microsoft/hcsshim/hnsendpoint.go | 94 + .../Microsoft/hcsshim/hnsglobals.go | 16 + .../Microsoft/hcsshim/hnsnetwork.go | 36 + .../github.com/Microsoft/hcsshim/hnspolicy.go | 57 + .../Microsoft/hcsshim/hnspolicylist.go | 47 + .../Microsoft/hcsshim/hnssupport.go | 13 + .../github.com/Microsoft/hcsshim/interface.go | 114 + .../hcsshim/internal/appargs/appargs.go | 93 + .../hcsshim/internal/cni/registry.go | 110 + .../hcsshim/internal/cni/registry_test.go | 137 + .../hcsshim/internal/copyfile/copyfile.go | 40 + .../copywithtimeout/copywithtimeout.go | 103 + .../hcsshim/internal/guestrequest/types.go | 100 + .../Microsoft/hcsshim/internal/guid/guid.go | 69 + .../hcsshim/internal/guid/guid_test.go | 136 + .../hcsshim/internal/hcs/callback.go | 104 + .../Microsoft/hcsshim/internal/hcs/cgo.go | 7 + .../Microsoft/hcsshim/internal/hcs/errors.go | 287 +++ .../Microsoft/hcsshim/internal/hcs/hcs.go | 48 + .../Microsoft/hcsshim/internal/hcs/log.go | 20 + .../Microsoft/hcsshim/internal/hcs/process.go | 459 ++++ .../Microsoft/hcsshim/internal/hcs/system.go | 685 +++++ .../Microsoft/hcsshim/internal/hcs/utils.go | 33 + .../hcsshim/internal/hcs/waithelper.go | 63 + .../Microsoft/hcsshim/internal/hcs/watcher.go | 41 + .../hcsshim/internal/hcs/zsyscall_windows.go | 533 ++++ .../hcsshim/internal/hcserror/hcserror.go | 47 + .../hcsshim/internal/hcsoci/create.go | 173 ++ .../hcsshim/internal/hcsoci/create_test.go | 78 + .../hcsshim/internal/hcsoci/hcsdoc_lcow.go | 115 + .../hcsshim/internal/hcsoci/hcsdoc_wcow.go | 273 ++ .../hcsshim/internal/hcsoci/layers.go | 373 +++ .../hcsshim/internal/hcsoci/network.go | 41 + .../hcsshim/internal/hcsoci/resources.go | 127 + .../hcsshim/internal/hcsoci/resources_lcow.go | 104 + .../hcsshim/internal/hcsoci/resources_wcow.go | 127 + .../internal/hcsoci/wcow_argon_test.go | 260 ++ .../internal/hcsoci/wcow_xenon_test.go | 365 +++ .../Microsoft/hcsshim/internal/hns/hns.go | 23 + .../hcsshim/internal/hns/hnsendpoint.go | 262 ++ .../hcsshim/internal/hns/hnsfuncs.go | 42 + .../hcsshim/internal/hns/hnsglobals.go | 28 + .../hcsshim/internal/hns/hnsnetwork.go | 141 + .../hcsshim/internal/hns/hnspolicy.go | 98 + .../hcsshim/internal/hns/hnspolicylist.go | 201 ++ .../hcsshim/internal/hns/hnssupport.go | 49 + .../hcsshim/internal/hns/namespace.go | 110 + .../hcsshim/internal/hns/zsyscall_windows.go | 76 + .../hcsshim/internal/interop/interop.go | 27 + .../internal/interop/zsyscall_windows.go | 48 + .../hcsshim/internal/lcow/constants.go | 9 + .../Microsoft/hcsshim/internal/lcow/debug.go | 55 + .../hcsshim/internal/lcow/process.go | 161 ++ .../hcsshim/internal/lcow/scratch.go | 168 ++ .../hcsshim/internal/lcow/tar2vhd.go | 46 + .../Microsoft/hcsshim/internal/lcow/types.go | 11 + .../hcsshim/internal/lcow/vhd2tar.go | 75 + .../hcsshim/internal/logfields/fields.go | 32 + .../hcsshim/internal/longpath/longpath.go | 24 + .../hcsshim/internal/mergemaps/merge.go | 52 + .../hcsshim/internal/ociwclayer/export.go | 79 + .../hcsshim/internal/ociwclayer/import.go | 141 + .../Microsoft/hcsshim/internal/ospath/join.go | 14 + .../hcsshim/internal/regstate/regstate.go | 287 +++ .../internal/regstate/regstate_test.go | 185 ++ .../internal/regstate/zsyscall_windows.go | 51 + .../hcsshim/internal/requesttype/types.go | 10 + .../hcsshim/internal/runhcs/container.go | 71 + .../Microsoft/hcsshim/internal/runhcs/util.go | 16 + .../hcsshim/internal/runhcs/util_test.go | 17 + .../Microsoft/hcsshim/internal/runhcs/vm.go | 43 + .../hcsshim/internal/safefile/safeopen.go | 431 ++++ .../internal/safefile/safeopen_admin_test.go | 125 + .../internal/safefile/safeopen_test.go | 53 + .../internal/safefile/zsyscall_windows.go | 79 + .../hcsshim/internal/schema1/schema1.go | 245 ++ .../hcsshim/internal/schema2/attachment.go | 31 + .../hcsshim/internal/schema2/battery.go | 13 + .../schema2/cache_query_stats_response.go | 19 + .../hcsshim/internal/schema2/chipset.go | 27 + .../hcsshim/internal/schema2/close_handle.go | 15 + .../hcsshim/internal/schema2/com_port.go | 18 + .../internal/schema2/compute_system.go | 27 + .../hcsshim/internal/schema2/configuration.go | 72 + .../hcsshim/internal/schema2/console_size.go | 17 + .../hcsshim/internal/schema2/container.go | 35 + .../container_credential_guard_state.go | 25 + .../schema2/container_memory_information.go | 26 + .../hcsshim/internal/schema2/device.go | 16 + .../hcsshim/internal/schema2/devices.go | 43 + .../internal/schema2/enhanced_mode_video.go | 15 + .../internal/schema2/flexible_io_device.go | 19 + .../internal/schema2/guest_connection.go | 19 + .../internal/schema2/guest_connection_info.go | 21 + .../internal/schema2/guest_crash_reporting.go | 15 + .../hcsshim/internal/schema2/guest_os.go | 15 + .../hcsshim/internal/schema2/guest_state.go | 22 + .../hcsshim/internal/schema2/hosted_system.go | 17 + .../hcsshim/internal/schema2/hv_socket.go | 17 + .../hcsshim/internal/schema2/hv_socket_2.go | 16 + .../schema2/hv_socket_service_config.go | 22 + .../schema2/hv_socket_system_config.go | 22 + .../hcsshim/internal/schema2/keyboard.go | 13 + .../hcsshim/internal/schema2/layer.go | 22 + .../internal/schema2/linux_kernel_direct.go | 18 + .../internal/schema2/mapped_directory.go | 21 + .../hcsshim/internal/schema2/mapped_pipe.go | 19 + .../hcsshim/internal/schema2/memory.go | 15 + .../hcsshim/internal/schema2/memory_2.go | 25 + .../schema2/memory_information_for_vm.go | 19 + .../hcsshim/internal/schema2/memory_stats.go | 20 + .../schema2/modify_setting_request.go | 20 + .../hcsshim/internal/schema2/mouse.go | 13 + .../internal/schema2/network_adapter.go | 17 + .../hcsshim/internal/schema2/networking.go | 24 + .../internal/schema2/pause_notification.go | 16 + .../hcsshim/internal/schema2/pause_options.go | 18 + .../hcsshim/internal/schema2/plan9.go | 15 + .../hcsshim/internal/schema2/plan9_share.go | 33 + .../internal/schema2/process_details.go | 34 + .../schema2/process_modify_request.go | 20 + .../internal/schema2/process_parameters.go | 47 + .../internal/schema2/process_status.go | 22 + .../hcsshim/internal/schema2/processor.go | 19 + .../hcsshim/internal/schema2/processor_2.go | 21 + .../internal/schema2/processor_stats.go | 20 + .../hcsshim/internal/schema2/properties.go | 47 + .../internal/schema2/property_query.go | 16 + .../schema2/rdp_connection_options.go | 17 + .../internal/schema2/registry_changes.go | 17 + .../hcsshim/internal/schema2/registry_key.go | 19 + .../internal/schema2/registry_value.go | 31 + .../hcsshim/internal/schema2/restore_state.go | 19 + .../hcsshim/internal/schema2/save_options.go | 19 + .../hcsshim/internal/schema2/scsi.go | 16 + .../schema2/shared_memory_configuration.go | 15 + .../internal/schema2/shared_memory_region.go | 23 + .../schema2/shared_memory_region_info.go | 17 + .../internal/schema2/silo_properties.go | 18 + .../hcsshim/internal/schema2/statistics.go | 30 + .../hcsshim/internal/schema2/storage.go | 21 + .../hcsshim/internal/schema2/storage_qo_s.go | 17 + .../hcsshim/internal/schema2/storage_stats.go | 22 + .../hcsshim/internal/schema2/topology.go | 17 + .../hcsshim/internal/schema2/uefi.go | 21 + .../internal/schema2/uefi_boot_entry.go | 23 + .../hcsshim/internal/schema2/version.go | 17 + .../hcsshim/internal/schema2/video_monitor.go | 19 + .../internal/schema2/virtual_machine.go | 32 + .../internal/schema2/virtual_node_info.go | 21 + .../schema2/virtual_p_mem_controller.go | 20 + .../internal/schema2/virtual_p_mem_device.go | 19 + .../hcsshim/internal/schema2/virtual_smb.go | 17 + .../internal/schema2/virtual_smb_share.go | 21 + .../schema2/virtual_smb_share_options.go | 63 + .../hcsshim/internal/schema2/vm_memory.go | 27 + .../schema2/windows_crash_reporting.go | 17 + .../internal/schemaversion/schemaversion.go | 81 + .../schemaversion/schemaversion_test.go | 63 + .../hcsshim/internal/timeout/timeout.go | 70 + .../hcsshim/internal/uvm/constants.go | 19 + .../Microsoft/hcsshim/internal/uvm/counter.go | 11 + .../Microsoft/hcsshim/internal/uvm/create.go | 62 + .../hcsshim/internal/uvm/create_lcow.go | 378 +++ .../hcsshim/internal/uvm/create_test.go | 25 + .../hcsshim/internal/uvm/create_wcow.go | 186 ++ .../Microsoft/hcsshim/internal/uvm/modify.go | 6 + .../Microsoft/hcsshim/internal/uvm/network.go | 251 ++ .../Microsoft/hcsshim/internal/uvm/plan9.go | 136 + .../Microsoft/hcsshim/internal/uvm/scsi.go | 318 +++ .../Microsoft/hcsshim/internal/uvm/start.go | 98 + .../Microsoft/hcsshim/internal/uvm/system.go | 7 + .../hcsshim/internal/uvm/terminate.go | 7 + .../Microsoft/hcsshim/internal/uvm/types.go | 105 + .../Microsoft/hcsshim/internal/uvm/vpmem.go | 170 ++ .../Microsoft/hcsshim/internal/uvm/vsmb.go | 112 + .../Microsoft/hcsshim/internal/uvm/wait.go | 46 + .../hcsshim/internal/uvmfolder/locate.go | 35 + .../hcsshim/internal/wclayer/activatelayer.go | 32 + .../hcsshim/internal/wclayer/baselayer.go | 173 ++ .../hcsshim/internal/wclayer/createlayer.go | 31 + .../internal/wclayer/createscratchlayer.go | 38 + .../internal/wclayer/deactivatelayer.go | 29 + .../hcsshim/internal/wclayer/destroylayer.go | 30 + .../internal/wclayer/expandscratchsize.go | 30 + .../hcsshim/internal/wclayer/exportlayer.go | 76 + .../internal/wclayer/getlayermountpath.go | 56 + .../internal/wclayer/getsharedbaseimages.go | 29 + .../hcsshim/internal/wclayer/grantvmaccess.go | 30 + .../hcsshim/internal/wclayer/importlayer.go | 135 + .../hcsshim/internal/wclayer/layerexists.go | 33 + .../hcsshim/internal/wclayer/layerid.go | 13 + .../hcsshim/internal/wclayer/layerutils.go | 96 + .../hcsshim/internal/wclayer/legacy.go | 815 ++++++ .../hcsshim/internal/wclayer/nametoguid.go | 34 + .../hcsshim/internal/wclayer/preparelayer.go | 47 + .../hcsshim/internal/wclayer/processimage.go | 23 + .../internal/wclayer/unpreparelayer.go | 30 + .../hcsshim/internal/wclayer/wclayer.go | 27 + .../internal/wclayer/zsyscall_windows.go | 510 ++++ .../hcsshim/internal/wcow/scratch.go | 26 + vendor/github.com/Microsoft/hcsshim/layer.go | 106 + .../Microsoft/hcsshim/mksyscall_windows.go | 943 +++++++ .../Microsoft/hcsshim/osversion/osversion.go | 51 + .../hcsshim/osversion/windowsbuilds.go | 10 + .../Microsoft/hcsshim/pkg/go-runhcs/LICENSE | 201 ++ .../Microsoft/hcsshim/pkg/go-runhcs/NOTICE | 22 + .../Microsoft/hcsshim/pkg/go-runhcs/runhcs.go | 173 ++ .../pkg/go-runhcs/runhcs_create-scratch.go | 10 + .../hcsshim/pkg/go-runhcs/runhcs_create.go | 101 + .../hcsshim/pkg/go-runhcs/runhcs_delete.go | 33 + .../hcsshim/pkg/go-runhcs/runhcs_exec.go | 88 + .../hcsshim/pkg/go-runhcs/runhcs_kill.go | 11 + .../hcsshim/pkg/go-runhcs/runhcs_list.go | 28 + .../hcsshim/pkg/go-runhcs/runhcs_pause.go | 10 + .../hcsshim/pkg/go-runhcs/runhcs_ps.go | 20 + .../pkg/go-runhcs/runhcs_resize-tty.go | 33 + .../hcsshim/pkg/go-runhcs/runhcs_resume.go | 10 + .../hcsshim/pkg/go-runhcs/runhcs_start.go | 10 + .../hcsshim/pkg/go-runhcs/runhcs_state.go | 20 + .../hcsshim/pkg/go-runhcs/runhcs_test.go | 68 + .../github.com/Microsoft/hcsshim/process.go | 72 + .../functional/assets/defaultlinuxspec.json | 257 ++ .../functional/assets/defaultwindowsspec.json | 11 + .../samples/config.justin.lcow.working.json | 254 ++ .../samples/from-docker-linux/privileged.json | 2006 +++++++++++++++ .../assets/samples/from-docker-linux/sh.json | 2259 +++++++++++++++++ .../hcsshim/test/functional/lcow_test.go | 224 ++ .../test/functional/manifest/manifest.go | 4 + .../test/functional/manifest/rsrc_amd64.syso | Bin 0 -> 372470 bytes .../hcsshim/test/functional/manifest_test.go | 3 + .../Microsoft/hcsshim/test/functional/test.go | 47 + .../test/functional/utilities/createuvm.go | 76 + .../functional/utilities/defaultlinuxspec.go | 21 + .../utilities/defaultwindowsspec.go | 21 + .../test/functional/utilities/layerfolders.go | 54 + .../functional/utilities/requiresbuild.go | 19 + .../test/functional/utilities/scratch.go | 59 + .../test/functional/utilities/tempdir.go | 15 + .../functional/uvm_mem_backingtype_test.go | 107 + .../test/functional/uvm_plannine_test.go | 40 + .../test/functional/uvm_properties_test.go | 48 + .../test/functional/uvm_scratch_test.go | 114 + .../hcsshim/test/functional/uvm_scsi_test.go | 119 + .../hcsshim/test/functional/uvm_vpmem_test.go | 48 + .../hcsshim/test/functional/uvm_vsmb_test.go | 45 + .../hcsshim/test/functional/wcow_test.go | 731 ++++++ .../test/functional/wcow_xenon_v2_test.go | 51 + .../test/runhcs/create-scratch_test.go | 67 + .../hcsshim/test/runhcs/e2e_matrix_test.go | 391 +++ .../hcsshim/test/runhcs/list_test.go | 25 + .../hcsshim/test/runhcs/runhcs_test.go | 7 + .../Microsoft/hcsshim/tools/uvmboot/main.go | 262 ++ .../tools/uvmboot/resource_windows_386.syso | Bin 0 -> 968 bytes .../tools/uvmboot/resource_windows_amd64.syso | Bin 0 -> 968 bytes .../github.com/Microsoft/hcsshim/vendor.conf | 21 + .../Microsoft/hcsshim/zsyscall_windows.go | 54 + 388 files changed, 48899 insertions(+) create mode 100644 collector/container.go create mode 100644 docs/collector.container.md create mode 100644 vendor/github.com/Microsoft/go-winio/.gitignore create mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/README.md create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/common.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/reader.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/writer.go create mode 100644 vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/backup.go create mode 100644 vendor/github.com/Microsoft/go-winio/backup_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/noop.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/tar.go create mode 100644 vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/ea.go create mode 100644 vendor/github.com/Microsoft/go-winio/ea_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/file.go create mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/etw.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/eventdata.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/eventdatadescriptor.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/eventdescriptor.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/eventmetadata.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/eventopt.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/fieldopt.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/provider.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/providerglobal.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_32.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_64.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/sample/sample.go create mode 100644 vendor/github.com/Microsoft/go-winio/internal/etw/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go create mode 100644 vendor/github.com/Microsoft/go-winio/privileges_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go create mode 100644 vendor/github.com/Microsoft/go-winio/tools/etw-provider-gen/main.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/vhd.go create mode 100644 vendor/github.com/Microsoft/go-winio/vhd/zvhd.go create mode 100644 vendor/github.com/Microsoft/go-winio/wim/decompress.go create mode 100644 vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go create mode 100644 vendor/github.com/Microsoft/go-winio/wim/validate/validate.go create mode 100644 vendor/github.com/Microsoft/go-winio/wim/wim.go create mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/.gitignore create mode 100644 vendor/github.com/Microsoft/hcsshim/.gometalinter.json create mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE create mode 100644 vendor/github.com/Microsoft/hcsshim/README.md create mode 100644 vendor/github.com/Microsoft/hcsshim/appveyor.yml create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/LICENSE create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/create-scratch.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/create.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/delete.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/exec.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/kill.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/kill_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/list.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/main.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/pause.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/ps.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/resource_windows_386.syso create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/resource_windows_amd64.syso create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/run.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/runhcs.exe.manifest create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/shim.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/signalmap.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/spec.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/start.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/tty.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/utils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/utils_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/versioninfo.json create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/runhcs/vm.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/tar2ext4/tar2ext4.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/create.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/export.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/import.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/mount.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/remove.go create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/resource_windows_386.syso create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/resource_windows_amd64.syso create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/versioninfo.json create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/wclayer.exe.manifest create mode 100644 vendor/github.com/Microsoft/hcsshim/cmd/wclayer/wclayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go create mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/verify_linux_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/verify_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/internal/format/format.go create mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/tar2ext4.go create mode 100644 vendor/github.com/Microsoft/hcsshim/ext4/tar2ext4/vhdfooter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcn.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnutils_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnv1schema_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hnsv1_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcsshim.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsendpoint.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsglobals.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnsnetwork.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicylist.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hnssupport.go create mode 100644 vendor/github.com/Microsoft/hcsshim/interface.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/appargs/appargs.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/cni/registry_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/copywithtimeout/copywithtimeout.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/guid/guid_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/layers.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_argon_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_xenon_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/lcow/constants.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/lcow/debug.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/lcow/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/lcow/tar2vhd.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/lcow/types.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/lcow/vhd2tar.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/export.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/import.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/ospath/join.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/requesttype/types.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/util_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_admin_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/create_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/system.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/terminate.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/locate.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go create mode 100644 vendor/github.com/Microsoft/hcsshim/layer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go create mode 100644 vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/process.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/assets/defaultlinuxspec.json create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/assets/defaultwindowsspec.json create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/config.justin.lcow.working.json create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/from-docker-linux/privileged.json create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/from-docker-linux/sh.json create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/lcow_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/manifest/manifest.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/manifest/rsrc_amd64.syso create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/manifest_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/utilities/createuvm.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/utilities/defaultlinuxspec.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/utilities/defaultwindowsspec.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/utilities/layerfolders.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/utilities/requiresbuild.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/utilities/scratch.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/utilities/tempdir.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/uvm_mem_backingtype_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/uvm_plannine_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/uvm_properties_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/uvm_scratch_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/uvm_scsi_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/uvm_vpmem_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/uvm_vsmb_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/wcow_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/functional/wcow_xenon_v2_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/runhcs/create-scratch_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/runhcs/e2e_matrix_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/runhcs/list_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/test/runhcs/runhcs_test.go create mode 100644 vendor/github.com/Microsoft/hcsshim/tools/uvmboot/main.go create mode 100644 vendor/github.com/Microsoft/hcsshim/tools/uvmboot/resource_windows_386.syso create mode 100644 vendor/github.com/Microsoft/hcsshim/tools/uvmboot/resource_windows_amd64.syso create mode 100644 vendor/github.com/Microsoft/hcsshim/vendor.conf create mode 100644 vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go diff --git a/appveyor.yml b/appveyor.yml index af6679e8..101a4a7b 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -11,6 +11,7 @@ clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter install: - set PATH=%GOPATH%\bin;%PATH% + - set PATH=%PATH%;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin - go get -u github.com/prometheus/promu - go get -u github.com/alecthomas/gometalinter && gometalinter --install - choco install gitversion.portable make -y diff --git a/collector/container.go b/collector/container.go new file mode 100644 index 00000000..87e02e06 --- /dev/null +++ b/collector/container.go @@ -0,0 +1,282 @@ +// +build windows + +package collector + +import ( + "github.com/Microsoft/hcsshim" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/log" +) + +func init() { + Factories["container"] = NewContainerMetricsCollector +} + +// A ContainerMetricsCollector is a Prometheus collector for containers metrics +type ContainerMetricsCollector struct { + // Presence + ContainerAvailable *prometheus.Desc + + // Number of containers + ContainersCount *prometheus.Desc + // memory + UsageCommitBytes *prometheus.Desc + UsageCommitPeakBytes *prometheus.Desc + UsagePrivateWorkingSetBytes *prometheus.Desc + + // CPU + RuntimeTotal *prometheus.Desc + RuntimeUser *prometheus.Desc + RuntimeKernel *prometheus.Desc + + // Network + BytesReceived *prometheus.Desc + BytesSent *prometheus.Desc + PacketsReceived *prometheus.Desc + PacketsSent *prometheus.Desc + DroppedPacketsIncoming *prometheus.Desc + DroppedPacketsOutgoing *prometheus.Desc +} + +// NewContainerMetricsCollector constructs a new ContainerMetricsCollector +func NewContainerMetricsCollector() (Collector, error) { + const subsystem = "container" + return &ContainerMetricsCollector{ + ContainerAvailable: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "available"), + "Available", + []string{"container_id"}, + nil, + ), + ContainersCount: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "count"), + "Number of containers", + nil, + nil, + ), + UsageCommitBytes: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_bytes"), + "Memory Usage Commit Bytes", + []string{"container_id"}, + nil, + ), + UsageCommitPeakBytes: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_peak_bytes"), + "Memory Usage Commit Peak Bytes", + []string{"container_id"}, + nil, + ), + UsagePrivateWorkingSetBytes: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "memory_usage_private_working_set_bytes"), + "Memory Usage Private Working Set Bytes", + []string{"container_id"}, + nil, + ), + RuntimeTotal: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_total"), + "Total Run time in Seconds", + []string{"container_id"}, + nil, + ), + RuntimeUser: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_usermode"), + "Run Time in User mode in Seconds", + []string{"container_id"}, + nil, + ), + RuntimeKernel: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_kernelmode"), + "Run time in Kernel mode in Seconds", + []string{"container_id"}, + nil, + ), + BytesReceived: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "network_receive_bytes_total"), + "Bytes Received on Interface", + []string{"container_id", "interface"}, + nil, + ), + BytesSent: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "network_transmit_bytes_total"), + "Bytes Sent on Interface", + []string{"container_id", "interface"}, + nil, + ), + PacketsReceived: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_total"), + "Packets Received on Interface", + []string{"container_id", "interface"}, + nil, + ), + PacketsSent: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_total"), + "Packets Sent on Interface", + []string{"container_id", "interface"}, + nil, + ), + DroppedPacketsIncoming: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_dropped_total"), + "Dropped Incoming Packets on Interface", + []string{"container_id", "interface"}, + nil, + ), + DroppedPacketsOutgoing: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_dropped_total"), + "Dropped Outgoing Packets on Interface", + []string{"container_id", "interface"}, + nil, + ), + }, nil +} + +// Collect sends the metric values for each metric +// to the provided prometheus Metric channel. +func (c *ContainerMetricsCollector) Collect(ch chan<- prometheus.Metric) error { + if desc, err := c.collect(ch); err != nil { + log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err) + return err + } + return nil +} + +// containerClose closes the container resource +func containerClose(c hcsshim.Container) { + err := c.Close() + if err != nil { + log.Error(err) + } +} + +func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) { + + // Types Container is passed to get the containers compute systems only + containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}}) + if err != nil { + log.Error("Err in Getting containers:", err) + return nil, err + } + + count := len(containers) + + ch <- prometheus.MustNewConstMetric( + c.ContainersCount, + prometheus.GaugeValue, + float64(count), + ) + if count == 0 { + return nil, nil + } + + for _, containerDetails := range containers { + containerId := containerDetails.ID + + container, err := hcsshim.OpenContainer(containerId) + if container != nil { + defer containerClose(container) + } + if err != nil { + log.Error("err in opening container: ", containerId, err) + continue + } + + cstats, err := container.Statistics() + if err != nil { + log.Error("err in fetching container Statistics: ", containerId, err) + continue + } + // HCS V1 is for docker runtime. Add the docker:// prefix on container_id + containerId = "docker://" + containerId + + ch <- prometheus.MustNewConstMetric( + c.ContainerAvailable, + prometheus.CounterValue, + 1, + containerId, + ) + ch <- prometheus.MustNewConstMetric( + c.UsageCommitBytes, + prometheus.GaugeValue, + float64(cstats.Memory.UsageCommitBytes), + containerId, + ) + ch <- prometheus.MustNewConstMetric( + c.UsageCommitPeakBytes, + prometheus.GaugeValue, + float64(cstats.Memory.UsageCommitPeakBytes), + containerId, + ) + ch <- prometheus.MustNewConstMetric( + c.UsagePrivateWorkingSetBytes, + prometheus.GaugeValue, + float64(cstats.Memory.UsagePrivateWorkingSetBytes), + containerId, + ) + ch <- prometheus.MustNewConstMetric( + c.RuntimeTotal, + prometheus.CounterValue, + float64(cstats.Processor.TotalRuntime100ns)*ticksToSecondsScaleFactor, + containerId, + ) + ch <- prometheus.MustNewConstMetric( + c.RuntimeUser, + prometheus.CounterValue, + float64(cstats.Processor.RuntimeUser100ns)*ticksToSecondsScaleFactor, + containerId, + ) + ch <- prometheus.MustNewConstMetric( + c.RuntimeKernel, + prometheus.CounterValue, + float64(cstats.Processor.RuntimeKernel100ns)*ticksToSecondsScaleFactor, + containerId, + ) + + if len(cstats.Network) == 0 { + log.Info("No Network Stats for container: ", containerId) + continue + } + + networkStats := cstats.Network + + for _, networkInterface := range networkStats { + ch <- prometheus.MustNewConstMetric( + c.BytesReceived, + prometheus.CounterValue, + float64(networkInterface.BytesReceived), + containerId, networkInterface.EndpointId, + ) + ch <- prometheus.MustNewConstMetric( + c.BytesSent, + prometheus.CounterValue, + float64(networkInterface.BytesSent), + containerId, networkInterface.EndpointId, + ) + ch <- prometheus.MustNewConstMetric( + c.PacketsReceived, + prometheus.CounterValue, + float64(networkInterface.PacketsReceived), + containerId, networkInterface.EndpointId, + ) + ch <- prometheus.MustNewConstMetric( + c.PacketsSent, + prometheus.CounterValue, + float64(networkInterface.PacketsSent), + containerId, networkInterface.EndpointId, + ) + ch <- prometheus.MustNewConstMetric( + c.DroppedPacketsIncoming, + prometheus.CounterValue, + float64(networkInterface.DroppedPacketsIncoming), + containerId, networkInterface.EndpointId, + ) + ch <- prometheus.MustNewConstMetric( + c.DroppedPacketsOutgoing, + prometheus.CounterValue, + float64(networkInterface.DroppedPacketsOutgoing), + containerId, networkInterface.EndpointId, + ) + break + } + } + + return nil, nil +} diff --git a/docs/collector.container.md b/docs/collector.container.md new file mode 100644 index 00000000..17da0915 --- /dev/null +++ b/docs/collector.container.md @@ -0,0 +1,40 @@ +# container collector + +The container collector exposes metrics about containers running on system + +Metric name prefix | `container` +Enabled by default? | No + +## Flags + +None + +## Metrics + +Name | Description | Type | Labels +-----|-------------|------|------- +`wmi_container_available` | Available | counter | `container_id` +`wmi_container_count` | Number of containers | gauge | `container_id` +`wmi_container_cpu_usage_seconds_kernelmode` | Run time in Kernel mode in Seconds | counter | `container_id` +`wmi_container_cpu_usage_seconds_usermode` | Run Time in User mode in Seconds | counter | `container_id` +`wmi_container_cpu_usage_seconds_total` | Total Run time in Seconds | counter | `container_id` +`wmi_container_memory_usage_commit_bytes` | Memory Usage Commit Bytes | gauge | `container_id` +`wmi_container_memory_usage_commit_peak_bytes` | Memory Usage Commit Peak Bytes | gauge | `container_id` +`wmi_container_memory_usage_private_working_set_bytes` | Memory Usage Private Working Set Bytes | gauge | `container_id` +`wmi_container_network_receive_bytes_total` | Bytes Received on Interface | counter | `container_id`, `interface` +`wmi_container_network_receive_packets_total` | Packets Received on Interface | counter | `container_id`, `interface` +`wmi_container_network_receive_packets_dropped_total` | Dropped Incoming Packets on Interface | counter | `container_id`, `interface` +`wmi_container_network_transmit_bytes_total` | Bytes Sent on Interface | counter | `container_id`, `interface` +`wmi_container_network_transmit_packets_total` | Packets Sent on Interface | counter | `container_id`, `interface` +`wmi_container_network_transmit_packets_dropped_total` | Dropped Outgoing Packets on Interface | counter | `container_id`, `interface` + +### Example metric +_wmi_container_network_receive_bytes_total{container_id="docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e",interface="822179E7-002C-4280-ABBA-28BCFE401826"} 9.3305343e+07_ + +This metric means that total _9.3305343e+07_ bytes received on interface _822179E7-002C-4280-ABBA-28BCFE401826_ for container _docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e_ + +## Useful queries +_This collector does not yet have any useful queries added, we would appreciate your help adding them!_ + +## Alerting examples +_This collector does not yet have alerting examples, we would appreciate your help adding them!_ diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 00000000..b883f1fd --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 00000000..b8b569d7 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 00000000..56800105 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,22 @@ +# go-winio + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. + +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go new file mode 100644 index 00000000..0378401c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "time" +) + +const ( + blockSize = 512 + + // Types + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + CreationTime time.Time // creation time + Xattrs map[string]string + Winheaders map[string]string +} + +// File name constants from the tar spec. +const ( + fileNameSize = 100 // Maximum number of bytes in a standard tar name. + fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. +) + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + // symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxCreationTime = "LIBARCHIVE.creationtime" + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxWindows = "MSWINDOWS." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +var zeroBlock = make([]byte, blockSize) + +// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. +// We compute and return both. +func checksum(header []byte) (unsigned int64, signed int64) { + for i := 0; i < len(header); i++ { + if i == 148 { + // The chksum field (header[148:156]) is special: it should be treated as space bytes. + unsigned += ' ' * 8 + signed += ' ' * 8 + i += 7 + continue + } + unsigned += int64(header[i]) + signed += int64(int8(header[i])) + } + return +} + +type slicer []byte + +func (sp *slicer) next(n int) (b []byte) { + s := *sp + b, *sp = s[0:n], s[n:] + return +} + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go b/vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go new file mode 100644 index 00000000..5f0ce2f4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go @@ -0,0 +1,80 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar_test + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "log" + "os" +) + +func Example() { + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new tar archive. + tw := tar.NewWriter(buf) + + // Add some files to the archive. + var files = []struct { + Name, Body string + }{ + {"readme.txt", "This archive contains some text files."}, + {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, + {"todo.txt", "Get animal handling license."}, + } + for _, file := range files { + hdr := &tar.Header{ + Name: file.Name, + Mode: 0600, + Size: int64(len(file.Body)), + } + if err := tw.WriteHeader(hdr); err != nil { + log.Fatalln(err) + } + if _, err := tw.Write([]byte(file.Body)); err != nil { + log.Fatalln(err) + } + } + // Make sure to check the error on Close. + if err := tw.Close(); err != nil { + log.Fatalln(err) + } + + // Open the tar archive for reading. + r := bytes.NewReader(buf.Bytes()) + tr := tar.NewReader(r) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + log.Fatalln(err) + } + fmt.Printf("Contents of %s:\n", hdr.Name) + if _, err := io.Copy(os.Stdout, tr); err != nil { + log.Fatalln(err) + } + fmt.Println() + } + + // Output: + // Contents of readme.txt: + // This archive contains some text files. + // Contents of gopher.txt: + // Gopher names: + // George + // Geoffrey + // Gonzo + // Contents of todo.txt: + // Get animal handling license. +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go new file mode 100644 index 00000000..e210c618 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go @@ -0,0 +1,1002 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "math" + "os" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +const maxNanoSecondIntSize = 9 + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + err error + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry + hdrBuff [blockSize]byte // buffer to use in readHeader +} + +type parser struct { + err error // Last error seen +} + +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a +// tar archive. +type sparseFileReader struct { + rfr numBytesReader // Reads the sparse-encoded file data + sp []sparseEntry // The sparse map for the file + pos int64 // Keeps track of file position + total int64 // Total size of the file +} + +// A sparseEntry holds a single entry in a sparse file's sparse map. +// +// Sparse files are represented using a series of sparseEntrys. +// Despite the name, a sparseEntry represents an actual data fragment that +// references data found in the underlying archive stream. All regions not +// covered by a sparseEntry are logically filled with zeros. +// +// For example, if the underlying raw file contains the 10-byte data: +// var compactData = "abcdefgh" +// +// And the sparse map has the following entries: +// var sp = []sparseEntry{ +// {offset: 2, numBytes: 5} // Data fragment for [2..7] +// {offset: 18, numBytes: 3} // Data fragment for [18..21] +// } +// +// Then the content of the resulting sparse file with a "real" size of 25 is: +// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type sparseEntry struct { + offset int64 // Starting position of the fragment + numBytes int64 // Length of the fragment +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// Keywords for old GNU sparse headers +const ( + oldGNUSparseMainHeaderOffset = 386 + oldGNUSparseMainHeaderIsExtendedOffset = 482 + oldGNUSparseMainHeaderNumEntries = 4 + oldGNUSparseExtendedHeaderIsExtendedOffset = 504 + oldGNUSparseExtendedHeaderNumEntries = 21 + oldGNUSparseOffsetSize = 12 + oldGNUSparseNumBytesSize = 12 +) + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + if tr.err != nil { + return nil, tr.err + } + + var hdr *Header + var extHdrs map[string]string + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". +loop: + for { + tr.err = tr.skipUnread() + if tr.err != nil { + return nil, tr.err + } + + hdr = tr.readHeader() + if tr.err != nil { + return nil, tr.err + } + + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader: + extHdrs, tr.err = parsePAX(tr) + if tr.err != nil { + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + var realname []byte + realname, tr.err = ioutil.ReadAll(tr) + if tr.err != nil { + return nil, tr.err + } + + // Convert GNU extensions to use PAX headers. + if extHdrs == nil { + extHdrs = make(map[string]string) + } + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + extHdrs[paxPath] = p.parseString(realname) + case TypeGNULongLink: + extHdrs[paxLinkpath] = p.parseString(realname) + } + if p.err != nil { + tr.err = p.err + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + default: + mergePAX(hdr, extHdrs) + + // Check for a PAX format sparse file + sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) + if err != nil { + tr.err = err + return nil, err + } + if sp != nil { + // Current file is a PAX format GNU sparse file. + // Set the current file reader to a sparse file reader. + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil, tr.err + } + } + break loop // This is a file, so stop + } + } + return hdr, nil +} + +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) error { + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxGname: + hdr.Gname = v + case paxUname: + hdr.Uname = v + case paxUid: + uid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Uid = int(uid) + case paxGid: + gid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Gid = int(gid) + case paxAtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.AccessTime = t + case paxMtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ModTime = t + case paxCtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ChangeTime = t + case paxCreationTime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.CreationTime = t + case paxSize: + size, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Size = int64(size) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } else if strings.HasPrefix(k, paxWindows) { + if hdr.Winheaders == nil { + hdr.Winheaders = make(map[string]string) + } + hdr.Winheaders[k[len(paxWindows):]] = v + } + } + } + return nil +} + +// parsePAXTime takes a string of the form %d.%d as described in +// the PAX specification. +func parsePAXTime(t string) (time.Time, error) { + buf := []byte(t) + pos := bytes.IndexByte(buf, '.') + var seconds, nanoseconds int64 + var err error + if pos == -1 { + seconds, err = strconv.ParseInt(t, 10, 0) + if err != nil { + return time.Time{}, err + } + } else { + seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) + if err != nil { + return time.Time{}, err + } + nano_buf := string(buf[pos+1:]) + // Pad as needed before converting to a decimal. + // For example .030 -> .030000000 -> 30000000 nanoseconds + if len(nano_buf) < maxNanoSecondIntSize { + // Right pad + nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) + } else if len(nano_buf) > maxNanoSecondIntSize { + // Right truncate + nano_buf = nano_buf[:maxNanoSecondIntSize] + } + nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) + if err != nil { + return time.Time{}, err + } + } + ts := time.Unix(seconds, nanoseconds) + return ts, nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + sbuf := string(buf) + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. + var sparseMap bytes.Buffer + + headers := make(map[string]string) + // Each record is constructed as + // "%d %s=%s\n", length, keyword, value + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { + return nil, ErrHeader + } + sbuf = residual + + keyStr := string(key) + if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { + // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. + sparseMap.WriteString(value) + sparseMap.Write([]byte{','}) + } else { + // Normal key. Set the value in the headers map. + headers[keyStr] = string(value) + } + } + if sparseMap.Len() != 0 { + // Add sparse info to headers, chopping off the extra comma + sparseMap.Truncate(sparseMap.Len() - 1) + headers[paxGNUSparseMap] = sparseMap.String() + } + return headers, nil +} + +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +// +// A PAX record is of the following form: +// "%d %s=%s\n" % (size, key, value) +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + return rec[:eq], rec[eq+1:], rem, nil +} + +// parseString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func (*parser) parseString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +// parseNumeric parses the input as being encoded in either base-256 or octal. +// This function may return negative numbers. +// If parsing fails or an integer overflow occurs, err will be set. +func (p *parser) parseNumeric(b []byte) int64 { + // Check for base-256 (binary) format first. + // If the first bit is set, then all following bits constitute a two's + // complement encoded number in big-endian byte order. + if len(b) > 0 && b[0]&0x80 != 0 { + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff + } + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) + } + + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +func (p *parser) parseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader + } + return int64(x) +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any +// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is +// encountered in the data portion; it is okay to hit io.EOF in the padding. +// +// Note that this function still works properly even when sparse files are being +// used since numBytes returns the bytes remaining in the underlying io.Reader. +func (tr *Reader) skipUnread() error { + dataSkip := tr.numBytes() // Number of data bytes to skip + totalSkip := dataSkip + tr.pad // Total number of bytes to skip + tr.curr, tr.pad = nil, 0 + + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the tar stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, os.SEEK_CUR) + if err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR) + if err != nil { + tr.err = err + return tr.err + } + seekSkipped = pos2 - pos1 + } + } + + var copySkipped int64 // Number of bytes skipped via CopyN + copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) + if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip { + tr.err = io.ErrUnexpectedEOF + } + return tr.err +} + +func (tr *Reader) verifyChecksum(header []byte) bool { + if tr.err != nil { + return false + } + + var p parser + given := p.parseOctal(header[148:156]) + unsigned, signed := checksum(header) + return p.err == nil && (given == unsigned || given == signed) +} + +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. +// +// The err will be set to io.EOF only when one of the following occurs: +// * Exactly 0 bytes are read and EOF is hit. +// * Exactly 1 block of zeros is read and EOF is hit. +// * At least 2 blocks of zeros are read. +func (tr *Reader) readHeader() *Header { + header := tr.hdrBuff[:] + copy(header, zeroBlock) + + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil // io.EOF is okay here + } + + // Two blocks of zero bytes marks the end of the archive. + if bytes.Equal(header, zeroBlock[0:blockSize]) { + if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { + return nil // io.EOF is okay here + } + if bytes.Equal(header, zeroBlock[0:blockSize]) { + tr.err = io.EOF + } else { + tr.err = ErrHeader // zero block and then non-zero block + } + return nil + } + + if !tr.verifyChecksum(header) { + tr.err = ErrHeader + return nil + } + + // Unpack + var p parser + hdr := new(Header) + s := slicer(header) + + hdr.Name = p.parseString(s.next(100)) + hdr.Mode = p.parseNumeric(s.next(8)) + hdr.Uid = int(p.parseNumeric(s.next(8))) + hdr.Gid = int(p.parseNumeric(s.next(8))) + hdr.Size = p.parseNumeric(s.next(12)) + hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0) + s.next(8) // chksum + hdr.Typeflag = s.next(1)[0] + hdr.Linkname = p.parseString(s.next(100)) + + // The remainder of the header depends on the value of magic. + // The original (v7) version of tar had no explicit magic field, + // so its magic bytes, like the rest of the block, are NULs. + magic := string(s.next(8)) // contains version field as well. + var format string + switch { + case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) + if string(header[508:512]) == "tar\x00" { + format = "star" + } else { + format = "posix" + } + case magic == "ustar \x00": // old GNU tar + format = "gnu" + } + + switch format { + case "posix", "gnu", "star": + hdr.Uname = p.parseString(s.next(32)) + hdr.Gname = p.parseString(s.next(32)) + devmajor := s.next(8) + devminor := s.next(8) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = p.parseNumeric(devmajor) + hdr.Devminor = p.parseNumeric(devminor) + } + var prefix string + switch format { + case "posix", "gnu": + prefix = p.parseString(s.next(155)) + case "star": + prefix = p.parseString(s.next(131)) + hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + + if p.err != nil { + tr.err = p.err + return nil + } + + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { + tr.err = ErrHeader + return nil + } + + // Set the current file reader. + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + tr.curr = ®FileReader{r: tr.r, nb: nb} + + // Check for old GNU sparse format entry. + if hdr.Typeflag == TypeGNUSparse { + // Get the real size of the file. + hdr.Size = p.parseNumeric(header[483:495]) + if p.err != nil { + tr.err = p.err + return nil + } + + // Read the sparse map. + sp := tr.readOldGNUSparseMap(header) + if tr.err != nil { + return nil + } + + // Current file is a GNU sparse file. Update the current file reader. + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil + } + } + + return hdr +} + +// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, +// then one or more extension headers are used to store the rest of the sparse map. +func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { + var p parser + isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 + spCap := oldGNUSparseMainHeaderNumEntries + if isExtended { + spCap += oldGNUSparseExtendedHeaderNumEntries + } + sp := make([]sparseEntry, 0, spCap) + s := slicer(header[oldGNUSparseMainHeaderOffset:]) + + // Read the four entries from the main tar header + for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + for isExtended { + // There are more entries. Read an extension header and parse its entries. + sparseHeader := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { + return nil + } + isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 + s = slicer(sparseHeader) + for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + } + return sp +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, numBytes). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + var cntNewline int64 + var buf bytes.Buffer + var blk = make([]byte, blockSize) + + // feedTokens copies data in numBlock chunks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + var feedTokens = func(cnt int64) error { + for cntNewline < cnt { + if _, err := io.ReadFull(r, blk); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + buf.Write(blk) + for _, c := range blk { + if c == '\n' { + cntNewline++ + } + } + } + return nil + } + + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + var nextToken = func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return tok[:len(tok)-1] // Cut off newline + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { + return nil, err + } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // Parse for all member entries. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := extHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map. + // numEntries is trusted now. + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +// +// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what +// the Header.Size claims. +func (tr *Reader) Read(b []byte) (n int, err error) { + if tr.err != nil { + return 0, tr.err + } + if tr.curr == nil { + return 0, io.EOF + } + + n, err = tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { + // file consumed + return 0, io.EOF + } + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] + } + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) + + if err == io.EOF && rfr.nb > 0 { + err = io.ErrUnexpectedEOF + } + return +} + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// newSparseFileReader creates a new sparseFileReader, but validates all of the +// sparse entries before doing so. +func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { + if total < 0 { + return nil, ErrHeader // Total size cannot be negative + } + + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + for i, s := range sp { + switch { + case s.offset < 0 || s.numBytes < 0: + return nil, ErrHeader // Negative values are never okay + case s.offset > math.MaxInt64-s.numBytes: + return nil, ErrHeader // Integer overflow with large length + case s.offset+s.numBytes > total: + return nil, ErrHeader // Region extends beyond the "real" size + case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: + return nil, ErrHeader // Regions can't overlap and must be in order + } + } + return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil +} + +// readHole reads a sparse hole ending at endOffset. +func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { + n64 := endOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + // Skip past all empty fragments. + for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { + sfr.sp = sfr.sp[1:] + } + + // If there are no more fragments, then it is possible that there + // is one last sparse hole. + if len(sfr.sp) == 0 { + // This behavior matches the BSD tar utility. + // However, GNU tar stops returning data even if sfr.total is unmet. + if sfr.pos < sfr.total { + return sfr.readHole(b, sfr.total), nil + } + return 0, io.EOF + } + + // In front of a data fragment, so read a hole. + if sfr.pos < sfr.sp[0].offset { + return sfr.readHole(b, sfr.sp[0].offset), nil + } + + // In a data fragment, so read from it. + // This math is overflow free since we verify that offset and numBytes can + // be safely added when creating the sparseFileReader. + endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment + bytesLeft := endPos - sfr.pos // Bytes left in fragment + if int64(len(b)) > bytesLeft { + b = b[:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + if err == io.EOF { + if sfr.pos < endPos { + err = io.ErrUnexpectedEOF // There was supposed to be more data + } else if sfr.pos < sfr.total { + err = nil // There is still an implicit sparse hole at the end + } + } + + if sfr.pos == endPos { + sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it + } + return n, err +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.numBytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go b/vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go new file mode 100644 index 00000000..7b148b51 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go @@ -0,0 +1,1125 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strings" + "testing" + "time" +) + +type untarTest struct { + file string // Test input file + headers []*Header // Expected output headers + chksums []string // MD5 checksum of files, leave as nil if not checked + err error // Expected error to occur +} + +var gnuTarTest = &untarTest{ + file: "testdata/gnu.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244428340, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244436044, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + }, + chksums: []string{ + "e38b27eaccb4391bdec553a7f3ae6b2f", + "c65bd2e50a56a2138bf1716f2fd56fe9", + }, +} + +var sparseTarTest = &untarTest{ + file: "testdata/sparse-formats.tar", + headers: []*Header{ + { + Name: "sparse-gnu", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392395740, 0), + Typeflag: 0x53, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392342187, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-0.1", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392340456, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "sparse-posix-1.0", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 200, + ModTime: time.Unix(1392337404, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + { + Name: "end", + Mode: 420, + Uid: 1000, + Gid: 1000, + Size: 4, + ModTime: time.Unix(1392398319, 0), + Typeflag: 0x30, + Linkname: "", + Uname: "david", + Gname: "david", + Devmajor: 0, + Devminor: 0, + }, + }, + chksums: []string{ + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "6f53234398c2449fe67c1812d993012f", + "b0061974914468de549a2af8ced10316", + }, +} + +var untarTests = []*untarTest{ + gnuTarTest, + sparseTarTest, + { + file: "testdata/star.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + { + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244592783, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + AccessTime: time.Unix(1244592783, 0), + ChangeTime: time.Unix(1244592783, 0), + }, + }, + }, + { + file: "testdata/v7.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + { + Name: "small2.txt", + Mode: 0444, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1244593104, 0), + Typeflag: '\x00', + }, + }, + }, + { + file: "testdata/pax.tar", + headers: []*Header{ + { + Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + Mode: 0664, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 7, + ModTime: time.Unix(1350244992, 23960108), + ChangeTime: time.Unix(1350244992, 23960108), + AccessTime: time.Unix(1350244992, 23960108), + Typeflag: TypeReg, + }, + { + Name: "a/b", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Uname: "shane", + Gname: "shane", + Size: 0, + ModTime: time.Unix(1350266320, 910238425), + ChangeTime: time.Unix(1350266320, 910238425), + AccessTime: time.Unix(1350266320, 910238425), + Typeflag: TypeSymlink, + Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + }, + }, + }, + { + file: "testdata/nil-uid.tar", // golang.org/issue/5290 + headers: []*Header{ + { + Name: "P1050238.JPG.log", + Mode: 0664, + Uid: 0, + Gid: 0, + Size: 14, + ModTime: time.Unix(1365454838, 0), + Typeflag: TypeReg, + Linkname: "", + Uname: "eyefi", + Gname: "eyefi", + Devmajor: 0, + Devminor: 0, + }, + }, + }, + { + file: "testdata/xattrs.tar", + headers: []*Header{ + { + Name: "small.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 5, + ModTime: time.Unix(1386065770, 448252320), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1389782956, 794414986), + Xattrs: map[string]string{ + "user.key": "value", + "user.key2": "value2", + // Interestingly, selinux encodes the terminating null inside the xattr + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + { + Name: "small2.txt", + Mode: 0644, + Uid: 1000, + Gid: 10, + Size: 11, + ModTime: time.Unix(1386065770, 449252304), + Typeflag: '0', + Uname: "alex", + Gname: "wheel", + AccessTime: time.Unix(1389782991, 419875220), + ChangeTime: time.Unix(1386065770, 449252304), + Xattrs: map[string]string{ + "security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + }, + }, + }, + { + // Matches the behavior of GNU, BSD, and STAR tar utilities. + file: "testdata/gnu-multi-hdrs.tar", + headers: []*Header{ + { + Name: "GNU2/GNU2/long-path-name", + Linkname: "GNU4/GNU4/long-linkpath-name", + ModTime: time.Unix(0, 0), + Typeflag: '2', + }, + }, + }, + { + // Matches the behavior of GNU and BSD tar utilities. + file: "testdata/pax-multi-hdrs.tar", + headers: []*Header{ + { + Name: "bar", + Linkname: "PAX4/PAX4/long-linkpath-name", + ModTime: time.Unix(0, 0), + Typeflag: '2', + }, + }, + }, + { + file: "testdata/neg-size.tar", + err: ErrHeader, + }, + { + file: "testdata/issue10968.tar", + err: ErrHeader, + }, + { + file: "testdata/issue11169.tar", + err: ErrHeader, + }, + { + file: "testdata/issue12435.tar", + err: ErrHeader, + }, +} + +func TestReader(t *testing.T) { + for i, v := range untarTests { + f, err := os.Open(v.file) + if err != nil { + t.Errorf("file %s, test %d: unexpected error: %v", v.file, i, err) + continue + } + defer f.Close() + + // Capture all headers and checksums. + var ( + tr = NewReader(f) + hdrs []*Header + chksums []string + rdbuf = make([]byte, 8) + ) + for { + var hdr *Header + hdr, err = tr.Next() + if err != nil { + if err == io.EOF { + err = nil // Expected error + } + break + } + hdrs = append(hdrs, hdr) + + if v.chksums == nil { + continue + } + h := md5.New() + _, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read + if err != nil { + break + } + chksums = append(chksums, fmt.Sprintf("%x", h.Sum(nil))) + } + + for j, hdr := range hdrs { + if j >= len(v.headers) { + t.Errorf("file %s, test %d, entry %d: unexpected header:\ngot %+v", + v.file, i, j, *hdr) + continue + } + if !reflect.DeepEqual(*hdr, *v.headers[j]) { + t.Errorf("file %s, test %d, entry %d: incorrect header:\ngot %+v\nwant %+v", + v.file, i, j, *hdr, *v.headers[j]) + } + } + if len(hdrs) != len(v.headers) { + t.Errorf("file %s, test %d: got %d headers, want %d headers", + v.file, i, len(hdrs), len(v.headers)) + } + + for j, sum := range chksums { + if j >= len(v.chksums) { + t.Errorf("file %s, test %d, entry %d: unexpected sum: got %s", + v.file, i, j, sum) + continue + } + if sum != v.chksums[j] { + t.Errorf("file %s, test %d, entry %d: incorrect checksum: got %s, want %s", + v.file, i, j, sum, v.chksums[j]) + } + } + + if err != v.err { + t.Errorf("file %s, test %d: unexpected error: got %v, want %v", + v.file, i, err, v.err) + } + f.Close() + } +} + +func TestPartialRead(t *testing.T) { + f, err := os.Open("testdata/gnu.tar") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + + // Read the first four bytes; Next() should skip the last byte. + hdr, err := tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get first file: %v", err) + } + buf := make([]byte, 4) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Kilt"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } + + // Second file + hdr, err = tr.Next() + if err != nil || hdr == nil { + t.Fatalf("Didn't get second file: %v", err) + } + buf = make([]byte, 6) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if expected := []byte("Google"); !bytes.Equal(buf, expected) { + t.Errorf("Contents = %v, want %v", buf, expected) + } +} + +func TestParsePAXHeader(t *testing.T) { + paxTests := [][3]string{ + {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths + {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length + {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}} + for _, test := range paxTests { + key, expected, raw := test[0], test[1], test[2] + reader := bytes.NewReader([]byte(raw)) + headers, err := parsePAX(reader) + if err != nil { + t.Errorf("Couldn't parse correctly formatted headers: %v", err) + continue + } + if strings.EqualFold(headers[key], expected) { + t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected) + continue + } + trailer := make([]byte, 100) + n, err := reader.Read(trailer) + if err != io.EOF || n != 0 { + t.Error("Buffer wasn't consumed") + } + } + badHeaderTests := [][]byte{ + []byte("3 somelongkey=\n"), + []byte("50 tooshort=\n"), + } + for _, test := range badHeaderTests { + if _, err := parsePAX(bytes.NewReader(test)); err != ErrHeader { + t.Fatal("Unexpected success when parsing bad header") + } + } +} + +func TestParsePAXTime(t *testing.T) { + // Some valid PAX time values + timestamps := map[string]time.Time{ + "1350244992.023960108": time.Unix(1350244992, 23960108), // The common case + "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value + "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value + "1350244992": time.Unix(1350244992, 0), // Low precision value + } + for input, expected := range timestamps { + ts, err := parsePAXTime(input) + if err != nil { + t.Fatal(err) + } + if !ts.Equal(expected) { + t.Fatalf("Time parsing failure %s %s", ts, expected) + } + } +} + +func TestMergePAX(t *testing.T) { + hdr := new(Header) + // Test a string, integer, and time based value. + headers := map[string]string{ + "path": "a/b/c", + "uid": "1000", + "mtime": "1350244992.023960108", + } + err := mergePAX(hdr, headers) + if err != nil { + t.Fatal(err) + } + want := &Header{ + Name: "a/b/c", + Uid: 1000, + ModTime: time.Unix(1350244992, 23960108), + } + if !reflect.DeepEqual(hdr, want) { + t.Errorf("incorrect merge: got %+v, want %+v", hdr, want) + } +} + +func TestSparseFileReader(t *testing.T) { + var vectors = []struct { + realSize int64 // Real size of the output file + sparseMap []sparseEntry // Input sparse map + sparseData string // Input compact data + expected string // Expected output data + err error // Expected error outcome + }{{ + realSize: 8, + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + sparseData: "abcde", + expected: "ab\x00\x00\x00cde", + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 0, numBytes: 2}, + {offset: 5, numBytes: 3}, + }, + sparseData: "abcde", + expected: "ab\x00\x00\x00cde\x00\x00", + }, { + realSize: 8, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + sparseData: "abcde", + expected: "\x00abc\x00\x00de", + }, { + realSize: 8, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 0}, + {offset: 6, numBytes: 0}, + {offset: 6, numBytes: 2}, + }, + sparseData: "abcde", + expected: "\x00abc\x00\x00de", + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + sparseData: "abcde", + expected: "\x00abc\x00\x00de\x00\x00", + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + {offset: 8, numBytes: 0}, + {offset: 8, numBytes: 0}, + {offset: 8, numBytes: 0}, + {offset: 8, numBytes: 0}, + }, + sparseData: "abcde", + expected: "\x00abc\x00\x00de\x00\x00", + }, { + realSize: 2, + sparseMap: []sparseEntry{}, + sparseData: "", + expected: "\x00\x00", + }, { + realSize: -2, + sparseMap: []sparseEntry{}, + err: ErrHeader, + }, { + realSize: -10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 2}, + }, + sparseData: "abcde", + err: ErrHeader, + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 5}, + }, + sparseData: "abcde", + err: ErrHeader, + }, { + realSize: 35, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: 5}, + }, + sparseData: "abcde", + err: io.ErrUnexpectedEOF, + }, { + realSize: 35, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 6, numBytes: -5}, + }, + sparseData: "abcde", + err: ErrHeader, + }, { + realSize: 35, + sparseMap: []sparseEntry{ + {offset: math.MaxInt64, numBytes: 3}, + {offset: 6, numBytes: -5}, + }, + sparseData: "abcde", + err: ErrHeader, + }, { + realSize: 10, + sparseMap: []sparseEntry{ + {offset: 1, numBytes: 3}, + {offset: 2, numBytes: 2}, + }, + sparseData: "abcde", + err: ErrHeader, + }} + + for i, v := range vectors { + r := bytes.NewReader([]byte(v.sparseData)) + rfr := ®FileReader{r: r, nb: int64(len(v.sparseData))} + + var sfr *sparseFileReader + var err error + var buf []byte + + sfr, err = newSparseFileReader(rfr, v.sparseMap, v.realSize) + if err != nil { + goto fail + } + if sfr.numBytes() != int64(len(v.sparseData)) { + t.Errorf("test %d, numBytes() before reading: got %d, want %d", i, sfr.numBytes(), len(v.sparseData)) + } + buf, err = ioutil.ReadAll(sfr) + if err != nil { + goto fail + } + if string(buf) != v.expected { + t.Errorf("test %d, ReadAll(): got %q, want %q", i, string(buf), v.expected) + } + if sfr.numBytes() != 0 { + t.Errorf("test %d, numBytes() after reading: got %d, want %d", i, sfr.numBytes(), 0) + } + + fail: + if err != v.err { + t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) + } + } +} + +func TestReadGNUSparseMap0x1(t *testing.T) { + const ( + maxUint = ^uint(0) + maxInt = int(maxUint >> 1) + ) + var ( + big1 = fmt.Sprintf("%d", int64(maxInt)) + big2 = fmt.Sprintf("%d", (int64(maxInt)/2)+1) + big3 = fmt.Sprintf("%d", (int64(maxInt) / 3)) + ) + + var vectors = []struct { + extHdrs map[string]string // Input data + sparseMap []sparseEntry // Expected sparse entries to be outputted + err error // Expected errors that may be raised + }{{ + extHdrs: map[string]string{paxGNUSparseNumBlocks: "-4"}, + err: ErrHeader, + }, { + extHdrs: map[string]string{paxGNUSparseNumBlocks: "fee "}, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: big1, + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: big2, + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: big3, + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0.5,5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,5.5,10,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,fewafewa.5,fewafw,5,20,5,30,5", + }, + err: ErrHeader, + }, { + extHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + sparseMap: []sparseEntry{{0, 5}, {10, 5}, {20, 5}, {30, 5}}, + }} + + for i, v := range vectors { + sp, err := readGNUSparseMap0x1(v.extHdrs) + if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) { + t.Errorf("test %d, readGNUSparseMap0x1(...): got %v, want %v", i, sp, v.sparseMap) + } + if err != v.err { + t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) + } + } +} + +func TestReadGNUSparseMap1x0(t *testing.T) { + var sp = []sparseEntry{{1, 2}, {3, 4}} + for i := 0; i < 98; i++ { + sp = append(sp, sparseEntry{54321, 12345}) + } + + var vectors = []struct { + input string // Input data + sparseMap []sparseEntry // Expected sparse entries to be outputted + cnt int // Expected number of bytes read + err error // Expected errors that may be raised + }{{ + input: "", + cnt: 0, + err: io.ErrUnexpectedEOF, + }, { + input: "ab", + cnt: 2, + err: io.ErrUnexpectedEOF, + }, { + input: strings.Repeat("\x00", 512), + cnt: 512, + err: io.ErrUnexpectedEOF, + }, { + input: strings.Repeat("\x00", 511) + "\n", + cnt: 512, + err: ErrHeader, + }, { + input: strings.Repeat("\n", 512), + cnt: 512, + err: ErrHeader, + }, { + input: "0\n" + strings.Repeat("\x00", 510) + strings.Repeat("a", 512), + sparseMap: []sparseEntry{}, + cnt: 512, + }, { + input: strings.Repeat("0", 512) + "0\n" + strings.Repeat("\x00", 510), + sparseMap: []sparseEntry{}, + cnt: 1024, + }, { + input: strings.Repeat("0", 1024) + "1\n2\n3\n" + strings.Repeat("\x00", 506), + sparseMap: []sparseEntry{{2, 3}}, + cnt: 1536, + }, { + input: strings.Repeat("0", 1024) + "1\n2\n\n" + strings.Repeat("\x00", 509), + cnt: 1536, + err: ErrHeader, + }, { + input: strings.Repeat("0", 1024) + "1\n2\n" + strings.Repeat("\x00", 508), + cnt: 1536, + err: io.ErrUnexpectedEOF, + }, { + input: "-1\n2\n\n" + strings.Repeat("\x00", 506), + cnt: 512, + err: ErrHeader, + }, { + input: "1\nk\n2\n" + strings.Repeat("\x00", 506), + cnt: 512, + err: ErrHeader, + }, { + input: "100\n1\n2\n3\n4\n" + strings.Repeat("54321\n0000000000000012345\n", 98) + strings.Repeat("\x00", 512), + cnt: 2560, + sparseMap: sp, + }} + + for i, v := range vectors { + r := strings.NewReader(v.input) + sp, err := readGNUSparseMap1x0(r) + if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) { + t.Errorf("test %d, readGNUSparseMap1x0(...): got %v, want %v", i, sp, v.sparseMap) + } + if numBytes := len(v.input) - r.Len(); numBytes != v.cnt { + t.Errorf("test %d, bytes read: got %v, want %v", i, numBytes, v.cnt) + } + if err != v.err { + t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) + } + } +} + +func TestUninitializedRead(t *testing.T) { + test := gnuTarTest + f, err := os.Open(test.file) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer f.Close() + + tr := NewReader(f) + _, err = tr.Read([]byte{}) + if err == nil || err != io.EOF { + t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF) + } + +} + +type reader struct{ io.Reader } +type readSeeker struct{ io.ReadSeeker } +type readBadSeeker struct{ io.ReadSeeker } + +func (rbs *readBadSeeker) Seek(int64, int) (int64, error) { return 0, fmt.Errorf("illegal seek") } + +// TestReadTruncation test the ending condition on various truncated files and +// that truncated files are still detected even if the underlying io.Reader +// satisfies io.Seeker. +func TestReadTruncation(t *testing.T) { + var ss []string + for _, p := range []string{ + "testdata/gnu.tar", + "testdata/ustar-file-reg.tar", + "testdata/pax-path-hdr.tar", + "testdata/sparse-formats.tar", + } { + buf, err := ioutil.ReadFile(p) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + ss = append(ss, string(buf)) + } + + data1, data2, pax, sparse := ss[0], ss[1], ss[2], ss[3] + data2 += strings.Repeat("\x00", 10*512) + trash := strings.Repeat("garbage ", 64) // Exactly 512 bytes + + var vectors = []struct { + input string // Input stream + cnt int // Expected number of headers read + err error // Expected error outcome + }{ + {"", 0, io.EOF}, // Empty file is a "valid" tar file + {data1[:511], 0, io.ErrUnexpectedEOF}, + {data1[:512], 1, io.ErrUnexpectedEOF}, + {data1[:1024], 1, io.EOF}, + {data1[:1536], 2, io.ErrUnexpectedEOF}, + {data1[:2048], 2, io.EOF}, + {data1, 2, io.EOF}, + {data1[:2048] + data2[:1536], 3, io.EOF}, + {data2[:511], 0, io.ErrUnexpectedEOF}, + {data2[:512], 1, io.ErrUnexpectedEOF}, + {data2[:1195], 1, io.ErrUnexpectedEOF}, + {data2[:1196], 1, io.EOF}, // Exact end of data and start of padding + {data2[:1200], 1, io.EOF}, + {data2[:1535], 1, io.EOF}, + {data2[:1536], 1, io.EOF}, // Exact end of padding + {data2[:1536] + trash[:1], 1, io.ErrUnexpectedEOF}, + {data2[:1536] + trash[:511], 1, io.ErrUnexpectedEOF}, + {data2[:1536] + trash, 1, ErrHeader}, + {data2[:2048], 1, io.EOF}, // Exactly 1 empty block + {data2[:2048] + trash[:1], 1, io.ErrUnexpectedEOF}, + {data2[:2048] + trash[:511], 1, io.ErrUnexpectedEOF}, + {data2[:2048] + trash, 1, ErrHeader}, + {data2[:2560], 1, io.EOF}, // Exactly 2 empty blocks (normal end-of-stream) + {data2[:2560] + trash[:1], 1, io.EOF}, + {data2[:2560] + trash[:511], 1, io.EOF}, + {data2[:2560] + trash, 1, io.EOF}, + {data2[:3072], 1, io.EOF}, + {pax, 0, io.EOF}, // PAX header without data is a "valid" tar file + {pax + trash[:1], 0, io.ErrUnexpectedEOF}, + {pax + trash[:511], 0, io.ErrUnexpectedEOF}, + {sparse[:511], 0, io.ErrUnexpectedEOF}, + // TODO(dsnet): This should pass, but currently fails. + // {sparse[:512], 0, io.ErrUnexpectedEOF}, + {sparse[:3584], 1, io.EOF}, + {sparse[:9200], 1, io.EOF}, // Terminate in padding of sparse header + {sparse[:9216], 1, io.EOF}, + {sparse[:9728], 2, io.ErrUnexpectedEOF}, + {sparse[:10240], 2, io.EOF}, + {sparse[:11264], 2, io.ErrUnexpectedEOF}, + {sparse, 5, io.EOF}, + {sparse + trash, 5, io.EOF}, + } + + for i, v := range vectors { + for j := 0; j < 6; j++ { + var tr *Reader + var s1, s2 string + + switch j { + case 0: + tr = NewReader(&reader{strings.NewReader(v.input)}) + s1, s2 = "io.Reader", "auto" + case 1: + tr = NewReader(&reader{strings.NewReader(v.input)}) + s1, s2 = "io.Reader", "manual" + case 2: + tr = NewReader(&readSeeker{strings.NewReader(v.input)}) + s1, s2 = "io.ReadSeeker", "auto" + case 3: + tr = NewReader(&readSeeker{strings.NewReader(v.input)}) + s1, s2 = "io.ReadSeeker", "manual" + case 4: + tr = NewReader(&readBadSeeker{strings.NewReader(v.input)}) + s1, s2 = "ReadBadSeeker", "auto" + case 5: + tr = NewReader(&readBadSeeker{strings.NewReader(v.input)}) + s1, s2 = "ReadBadSeeker", "manual" + } + + var cnt int + var err error + for { + if _, err = tr.Next(); err != nil { + break + } + cnt++ + if s2 == "manual" { + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + break + } + } + } + if err != v.err { + t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %v, want %v", + i, s1, s2, err, v.err) + } + if cnt != v.cnt { + t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %d headers, want %d headers", + i, s1, s2, cnt, v.cnt) + } + } + } +} + +// TestReadHeaderOnly tests that Reader does not attempt to read special +// header-only files. +func TestReadHeaderOnly(t *testing.T) { + f, err := os.Open("testdata/hdr-only.tar") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer f.Close() + + var hdrs []*Header + tr := NewReader(f) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Errorf("Next(): got %v, want %v", err, nil) + continue + } + hdrs = append(hdrs, hdr) + + // If a special flag, we should read nothing. + cnt, _ := io.ReadFull(tr, []byte{0}) + if cnt > 0 && hdr.Typeflag != TypeReg { + t.Errorf("ReadFull(...): got %d bytes, want 0 bytes", cnt) + } + } + + // File is crafted with 16 entries. The later 8 are identical to the first + // 8 except that the size is set. + if len(hdrs) != 16 { + t.Fatalf("len(hdrs): got %d, want %d", len(hdrs), 16) + } + for i := 0; i < 8; i++ { + var hdr1, hdr2 = hdrs[i+0], hdrs[i+8] + hdr1.Size, hdr2.Size = 0, 0 + if !reflect.DeepEqual(*hdr1, *hdr2) { + t.Errorf("incorrect header:\ngot %+v\nwant %+v", *hdr1, *hdr2) + } + } +} + +func TestParsePAXRecord(t *testing.T) { + var medName = strings.Repeat("CD", 50) + var longName = strings.Repeat("AB", 100) + + var vectors = []struct { + input string + residual string + outputKey string + outputVal string + ok bool + }{ + {"6 k=v\n\n", "\n", "k", "v", true}, + {"19 path=/etc/hosts\n", "", "path", "/etc/hosts", true}, + {"210 path=" + longName + "\nabc", "abc", "path", longName, true}, + {"110 path=" + medName + "\n", "", "path", medName, true}, + {"9 foo=ba\n", "", "foo", "ba", true}, + {"11 foo=bar\n\x00", "\x00", "foo", "bar", true}, + {"18 foo=b=\nar=\n==\x00\n", "", "foo", "b=\nar=\n==\x00", true}, + {"27 foo=hello9 foo=ba\nworld\n", "", "foo", "hello9 foo=ba\nworld", true}, + {"27 ☺☻☹=日a本b語ç\nmeow mix", "meow mix", "☺☻☹", "日a本b語ç", true}, + {"17 \x00hello=\x00world\n", "", "\x00hello", "\x00world", true}, + {"1 k=1\n", "1 k=1\n", "", "", false}, + {"6 k~1\n", "6 k~1\n", "", "", false}, + {"6_k=1\n", "6_k=1\n", "", "", false}, + {"6 k=1 ", "6 k=1 ", "", "", false}, + {"632 k=1\n", "632 k=1\n", "", "", false}, + {"16 longkeyname=hahaha\n", "16 longkeyname=hahaha\n", "", "", false}, + {"3 somelongkey=\n", "3 somelongkey=\n", "", "", false}, + {"50 tooshort=\n", "50 tooshort=\n", "", "", false}, + } + + for _, v := range vectors { + key, val, res, err := parsePAXRecord(v.input) + ok := (err == nil) + if v.ok != ok { + if v.ok { + t.Errorf("parsePAXRecord(%q): got parsing failure, want success", v.input) + } else { + t.Errorf("parsePAXRecord(%q): got parsing success, want failure", v.input) + } + } + if ok && (key != v.outputKey || val != v.outputVal) { + t.Errorf("parsePAXRecord(%q): got (%q: %q), want (%q: %q)", + v.input, key, val, v.outputKey, v.outputVal) + } + if res != v.residual { + t.Errorf("parsePAXRecord(%q): got residual %q, want residual %q", + v.input, res, v.residual) + } + } +} + +func TestParseNumeric(t *testing.T) { + var vectors = []struct { + input string + output int64 + ok bool + }{ + // Test base-256 (binary) encoded values. + {"", 0, true}, + {"\x80", 0, true}, + {"\x80\x00", 0, true}, + {"\x80\x00\x00", 0, true}, + {"\xbf", (1 << 6) - 1, true}, + {"\xbf\xff", (1 << 14) - 1, true}, + {"\xbf\xff\xff", (1 << 22) - 1, true}, + {"\xff", -1, true}, + {"\xff\xff", -1, true}, + {"\xff\xff\xff", -1, true}, + {"\xc0", -1 * (1 << 6), true}, + {"\xc0\x00", -1 * (1 << 14), true}, + {"\xc0\x00\x00", -1 * (1 << 22), true}, + {"\x87\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true}, + {"\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true}, + {"\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true}, + {"\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true}, + {"\x80\x7f\xff\xff\xff\xff\xff\xff\xff", math.MaxInt64, true}, + {"\x80\x80\x00\x00\x00\x00\x00\x00\x00", 0, false}, + {"\xff\x80\x00\x00\x00\x00\x00\x00\x00", math.MinInt64, true}, + {"\xff\x7f\xff\xff\xff\xff\xff\xff\xff", 0, false}, + {"\xf5\xec\xd1\xc7\x7e\x5f\x26\x48\x81\x9f\x8f\x9b", 0, false}, + + // Test base-8 (octal) encoded values. + {"0000000\x00", 0, true}, + {" \x0000000\x00", 0, true}, + {" \x0000003\x00", 3, true}, + {"00000000227\x00", 0227, true}, + {"032033\x00 ", 032033, true}, + {"320330\x00 ", 0320330, true}, + {"0000660\x00 ", 0660, true}, + {"\x00 0000660\x00 ", 0660, true}, + {"0123456789abcdef", 0, false}, + {"0123456789\x00abcdef", 0, false}, + {"01234567\x0089abcdef", 342391, true}, + {"0123\x7e\x5f\x264123", 0, false}, + } + + for _, v := range vectors { + var p parser + num := p.parseNumeric([]byte(v.input)) + ok := (p.err == nil) + if v.ok != ok { + if v.ok { + t.Errorf("parseNumeric(%q): got parsing failure, want success", v.input) + } else { + t.Errorf("parseNumeric(%q): got parsing success, want failure", v.input) + } + } + if ok && num != v.output { + t.Errorf("parseNumeric(%q): got %d, want %d", v.input, num, v.output) + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go new file mode 100644 index 00000000..cf9cc79c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go new file mode 100644 index 00000000..6f17dbe3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go new file mode 100644 index 00000000..cb843db4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go b/vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go new file mode 100644 index 00000000..d63c072e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go @@ -0,0 +1,325 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "io/ioutil" + "os" + "path" + "reflect" + "strings" + "testing" + "time" +) + +func TestFileInfoHeader(t *testing.T) { + fi, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "small.txt"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(5); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } + // FileInfoHeader should error when passing nil FileInfo + if _, err := FileInfoHeader(nil, ""); err == nil { + t.Fatalf("Expected error when passing nil to FileInfoHeader") + } +} + +func TestFileInfoHeaderDir(t *testing.T) { + fi, err := os.Stat("testdata") + if err != nil { + t.Fatal(err) + } + h, err := FileInfoHeader(fi, "") + if err != nil { + t.Fatalf("FileInfoHeader: %v", err) + } + if g, e := h.Name, "testdata/"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + // Ignoring c_ISGID for golang.org/issue/4867 + if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e { + t.Errorf("Mode = %#o; want %#o", g, e) + } + if g, e := h.Size, int64(0); g != e { + t.Errorf("Size = %v; want %v", g, e) + } + if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) { + t.Errorf("ModTime = %v; want %v", g, e) + } +} + +func TestFileInfoHeaderSymlink(t *testing.T) { + h, err := FileInfoHeader(symlink{}, "some-target") + if err != nil { + t.Fatal(err) + } + if g, e := h.Name, "some-symlink"; g != e { + t.Errorf("Name = %q; want %q", g, e) + } + if g, e := h.Linkname, "some-target"; g != e { + t.Errorf("Linkname = %q; want %q", g, e) + } +} + +type symlink struct{} + +func (symlink) Name() string { return "some-symlink" } +func (symlink) Size() int64 { return 0 } +func (symlink) Mode() os.FileMode { return os.ModeSymlink } +func (symlink) ModTime() time.Time { return time.Time{} } +func (symlink) IsDir() bool { return false } +func (symlink) Sys() interface{} { return nil } + +func TestRoundTrip(t *testing.T) { + data := []byte("some file contents") + + var b bytes.Buffer + tw := NewWriter(&b) + hdr := &Header{ + Name: "file.txt", + Uid: 1 << 21, // too big for 8 octal digits + Size: int64(len(data)), + ModTime: time.Now(), + } + // tar only supports second precision. + hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond) + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("tw.WriteHeader: %v", err) + } + if _, err := tw.Write(data); err != nil { + t.Fatalf("tw.Write: %v", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("tw.Close: %v", err) + } + + // Read it back. + tr := NewReader(&b) + rHdr, err := tr.Next() + if err != nil { + t.Fatalf("tr.Next: %v", err) + } + if !reflect.DeepEqual(rHdr, hdr) { + t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr) + } + rData, err := ioutil.ReadAll(tr) + if err != nil { + t.Fatalf("Read: %v", err) + } + if !bytes.Equal(rData, data) { + t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data) + } +} + +type headerRoundTripTest struct { + h *Header + fm os.FileMode +} + +func TestHeaderRoundTrip(t *testing.T) { + golden := []headerRoundTripTest{ + // regular file. + { + h: &Header{ + Name: "test.txt", + Mode: 0644 | c_ISREG, + Size: 12, + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeReg, + }, + fm: 0644, + }, + // symbolic link. + { + h: &Header{ + Name: "link.txt", + Mode: 0777 | c_ISLNK, + Size: 0, + ModTime: time.Unix(1360600852, 0), + Typeflag: TypeSymlink, + }, + fm: 0777 | os.ModeSymlink, + }, + // character device node. + { + h: &Header{ + Name: "dev/null", + Mode: 0666 | c_ISCHR, + Size: 0, + ModTime: time.Unix(1360578951, 0), + Typeflag: TypeChar, + }, + fm: 0666 | os.ModeDevice | os.ModeCharDevice, + }, + // block device node. + { + h: &Header{ + Name: "dev/sda", + Mode: 0660 | c_ISBLK, + Size: 0, + ModTime: time.Unix(1360578954, 0), + Typeflag: TypeBlock, + }, + fm: 0660 | os.ModeDevice, + }, + // directory. + { + h: &Header{ + Name: "dir/", + Mode: 0755 | c_ISDIR, + Size: 0, + ModTime: time.Unix(1360601116, 0), + Typeflag: TypeDir, + }, + fm: 0755 | os.ModeDir, + }, + // fifo node. + { + h: &Header{ + Name: "dev/initctl", + Mode: 0600 | c_ISFIFO, + Size: 0, + ModTime: time.Unix(1360578949, 0), + Typeflag: TypeFifo, + }, + fm: 0600 | os.ModeNamedPipe, + }, + // setuid. + { + h: &Header{ + Name: "bin/su", + Mode: 0755 | c_ISREG | c_ISUID, + Size: 23232, + ModTime: time.Unix(1355405093, 0), + Typeflag: TypeReg, + }, + fm: 0755 | os.ModeSetuid, + }, + // setguid. + { + h: &Header{ + Name: "group.txt", + Mode: 0750 | c_ISREG | c_ISGID, + Size: 0, + ModTime: time.Unix(1360602346, 0), + Typeflag: TypeReg, + }, + fm: 0750 | os.ModeSetgid, + }, + // sticky. + { + h: &Header{ + Name: "sticky.txt", + Mode: 0600 | c_ISREG | c_ISVTX, + Size: 7, + ModTime: time.Unix(1360602540, 0), + Typeflag: TypeReg, + }, + fm: 0600 | os.ModeSticky, + }, + // hard link. + { + h: &Header{ + Name: "hard.txt", + Mode: 0644 | c_ISREG, + Size: 0, + Linkname: "file.txt", + ModTime: time.Unix(1360600916, 0), + Typeflag: TypeLink, + }, + fm: 0644, + }, + // More information. + { + h: &Header{ + Name: "info.txt", + Mode: 0600 | c_ISREG, + Size: 0, + Uid: 1000, + Gid: 1000, + ModTime: time.Unix(1360602540, 0), + Uname: "slartibartfast", + Gname: "users", + Typeflag: TypeReg, + }, + fm: 0600, + }, + } + + for i, g := range golden { + fi := g.h.FileInfo() + h2, err := FileInfoHeader(fi, "") + if err != nil { + t.Error(err) + continue + } + if strings.Contains(fi.Name(), "/") { + t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name()) + } + name := path.Base(g.h.Name) + if fi.IsDir() { + name += "/" + } + if got, want := h2.Name, name; got != want { + t.Errorf("i=%d: Name: got %v, want %v", i, got, want) + } + if got, want := h2.Size, g.h.Size; got != want { + t.Errorf("i=%d: Size: got %v, want %v", i, got, want) + } + if got, want := h2.Uid, g.h.Uid; got != want { + t.Errorf("i=%d: Uid: got %d, want %d", i, got, want) + } + if got, want := h2.Gid, g.h.Gid; got != want { + t.Errorf("i=%d: Gid: got %d, want %d", i, got, want) + } + if got, want := h2.Uname, g.h.Uname; got != want { + t.Errorf("i=%d: Uname: got %q, want %q", i, got, want) + } + if got, want := h2.Gname, g.h.Gname; got != want { + t.Errorf("i=%d: Gname: got %q, want %q", i, got, want) + } + if got, want := h2.Linkname, g.h.Linkname; got != want { + t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want) + } + if got, want := h2.Typeflag, g.h.Typeflag; got != want { + t.Logf("%#v %#v", g.h, fi.Sys()) + t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want) + } + if got, want := h2.Mode, g.h.Mode; got != want { + t.Errorf("i=%d: Mode: got %o, want %o", i, got, want) + } + if got, want := fi.Mode(), g.fm; got != want { + t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want) + } + if got, want := h2.AccessTime, g.h.AccessTime; got != want { + t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want) + } + if got, want := h2.ChangeTime, g.h.ChangeTime; got != want { + t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want) + } + if got, want := h2.ModTime, g.h.ModTime; got != want { + t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want) + } + if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h { + t.Errorf("i=%d: Sys didn't return original *Header", i) + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go new file mode 100644 index 00000000..30d7e606 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go @@ -0,0 +1,444 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "path" + "sort" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use pax header instead of binary numeric header + hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header + paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header +} + +type formatter struct { + err error // Last error seen +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +// Write s into b, terminating it with a NUL if there is room. +func (f *formatter) formatString(b []byte, s string) { + if len(s) > len(b) { + f.err = ErrFieldTooLong + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// Encode x as an octal ASCII string and write it into b with leading zeros. +func (f *formatter) formatOctal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // leading zeros, but leave room for a NUL. + for len(s)+1 < len(b) { + s = "0" + s + } + f.formatString(b, s) +} + +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + var binBits = uint(n-1) * 8 + return n >= 9 || (x >= -1<= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format + return + } + + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(shanemhansen): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + var f formatter + var header []byte + + // We need to select which scratch buffer to use carefully, + // since this method is called recursively to write PAX headers. + // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. + // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is + // already being used by the non-recursive call, so we must use paxHdrBuff. + header = tw.hdrBuff[:] + if !allowPax { + header = tw.paxHdrBuff[:] + } + copy(header, zeroBlock) + s := slicer(header) + + // Wrappers around formatter that automatically sets paxHeaders if the + // argument extends beyond the capacity of the input byte slice. + var formatString = func(b []byte, s string, paxKeyword string) { + needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + f.formatString(b, s) + } + var formatNumeric = func(b []byte, x int64, paxKeyword string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + f.formatOctal(b, x) + return + } + + // If it is too long for octal, and PAX is preferred, use a PAX header. + if paxKeyword != paxNone && tw.preferPax { + f.formatOctal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + tw.usedBinary = true + f.formatNumeric(b, x) + } + var formatTime = func(b []byte, t time.Time, paxKeyword string) { + var unixTime int64 + if !t.Before(minTime) && !t.After(maxTime) { + unixTime = t.Unix() + } + formatNumeric(b, unixTime, paxNone) + + // Write a PAX header if the time didn't fit precisely. + if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) { + paxHeaders[paxKeyword] = formatPAXTime(t) + } + } + + // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + pathHeaderBytes := s.next(fileNameSize) + + formatString(pathHeaderBytes, hdr.Name, paxPath) + + f.formatOctal(s.next(8), hdr.Mode) // 100:108 + formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116 + formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124 + formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136 + formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148 + s.next(8) // chksum (148:156) + s.next(1)[0] = hdr.Typeflag // 156:157 + + formatString(s.next(100), hdr.Linkname, paxLinkpath) + + copy(s.next(8), []byte("ustar\x0000")) // 257:265 + formatString(s.next(32), hdr.Uname, paxUname) // 265:297 + formatString(s.next(32), hdr.Gname, paxGname) // 297:329 + formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337 + formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345 + + // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + prefixHeaderBytes := s.next(155) + formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix + + // Use the GNU magic instead of POSIX magic if we used any GNU extensions. + if tw.usedBinary { + copy(header[257:265], []byte("ustar \x00")) + } + + _, paxPathUsed := paxHeaders[paxPath] + // try to use a ustar header when only the name is too long + if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + prefix, suffix, ok := splitUSTARPath(hdr.Name) + if ok { + // Since we can encode in USTAR format, disable PAX header. + delete(paxHeaders, paxPath) + + // Update the path fields + formatString(pathHeaderBytes, suffix, paxNone) + formatString(prefixHeaderBytes, prefix, paxNone) + } + } + + // The chksum field is terminated by a NUL and a space. + // This is different from the other octal fields. + chksum, _ := checksum(header) + f.formatOctal(header[148:155], chksum) // Never fails + header[155] = ' ' + + // Check if there were any formatting errors. + if f.err != nil { + tw.err = f.err + return tw.err + } + + if allowPax { + if !hdr.AccessTime.IsZero() { + paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime) + } + if !hdr.ChangeTime.IsZero() { + paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime) + } + if !hdr.CreationTime.IsZero() { + paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime) + } + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + for k, v := range hdr.Winheaders { + paxHeaders[paxWindows+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = int64(hdr.Size) + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header) + return tw.err +} + +func formatPAXTime(t time.Time) string { + sec := t.Unix() + usec := t.Nanosecond() + s := strconv.FormatInt(sec, 10) + if usec != 0 { + s = fmt.Sprintf("%s.%09d", s, usec) + } + return s +} + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= fileNameSize || !isASCII(name) { + return "", "", false + } else if length > fileNamePrefixSize+1 { + length = fileNamePrefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. However, this results in differing outputs + // for identical inputs. As such, the constant 0 is now used instead. + // golang.org/issue/12358 + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, "PaxHeaders.0", file) + + ascii := toASCII(fullName) + if len(ascii) > 100 { + ascii = ascii[:100] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + // Keys are sorted before writing to body to allow deterministic output. + var keys []string + for k := range paxHeaders { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) string { + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s=%s\n", size, k, v) + + // Final adjustment if adding size field increased the record size. + if len(record) != size { + size = len(record) + record = fmt.Sprintf("%d %s=%s\n", size, k, v) + } + return record +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteAfterClose + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock) + if tw.err != nil { + break + } + } + return tw.err +} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go b/vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go new file mode 100644 index 00000000..a5c93827 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go @@ -0,0 +1,739 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "sort" + "strings" + "testing" + "testing/iotest" + "time" +) + +type writerTestEntry struct { + header *Header + contents string +} + +type writerTest struct { + file string // filename of expected output + entries []*writerTestEntry +} + +var writerTests = []*writerTest{ + // The writer test file was produced with this command: + // tar (GNU tar) 1.26 + // ln -s small.txt link.txt + // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt + { + file: "testdata/writer.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "small.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 5, + ModTime: time.Unix(1246508266, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Kilts", + }, + { + header: &Header{ + Name: "small2.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 11, + ModTime: time.Unix(1245217492, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + contents: "Google.com\n", + }, + { + header: &Header{ + Name: "link.txt", + Mode: 0777, + Uid: 1000, + Gid: 1000, + Size: 0, + ModTime: time.Unix(1314603082, 0), + Typeflag: '2', + Linkname: "small.txt", + Uname: "strings", + Gname: "strings", + }, + // no contents + }, + }, + }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt + // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar + { + file: "testdata/writer-big.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "tmp/16gig.txt", + Mode: 0640, + Uid: 73025, + Gid: 5000, + Size: 16 << 30, + ModTime: time.Unix(1254699560, 0), + Typeflag: '0', + Uname: "dsymonds", + Gname: "eng", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, + // The truncated test file was produced using these commands: + // dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt + // tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar + { + file: "testdata/writer-big-long.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "16gig.txt", + Mode: 0644, + Uid: 1000, + Gid: 1000, + Size: 16 << 30, + ModTime: time.Unix(1399583047, 0), + Typeflag: '0', + Uname: "guillaume", + Gname: "guillaume", + }, + // fake contents + contents: strings.Repeat("\x00", 4<<10), + }, + }, + }, + // This file was produced using gnu tar 1.17 + // gnutar -b 4 --format=ustar (longname/)*15 + file.txt + { + file: "testdata/ustar.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: strings.Repeat("longname/", 15) + "file.txt", + Mode: 0644, + Uid: 0765, + Gid: 024, + Size: 06, + ModTime: time.Unix(1360135598, 0), + Typeflag: '0', + Uname: "shane", + Gname: "staff", + }, + contents: "hello\n", + }, + }, + }, + // This file was produced using gnu tar 1.26 + // echo "Slartibartfast" > file.txt + // ln file.txt hard.txt + // tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt + { + file: "testdata/hardlink.tar", + entries: []*writerTestEntry{ + { + header: &Header{ + Name: "file.txt", + Mode: 0644, + Uid: 1000, + Gid: 100, + Size: 15, + ModTime: time.Unix(1425484303, 0), + Typeflag: '0', + Uname: "vbatts", + Gname: "users", + }, + contents: "Slartibartfast\n", + }, + { + header: &Header{ + Name: "hard.txt", + Mode: 0644, + Uid: 1000, + Gid: 100, + Size: 0, + ModTime: time.Unix(1425484303, 0), + Typeflag: '1', + Linkname: "file.txt", + Uname: "vbatts", + Gname: "users", + }, + // no contents + }, + }, + }, +} + +// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection. +func bytestr(offset int, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("%04x ", offset) + for _, ch := range b { + switch { + case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z': + s += fmt.Sprintf(" %c", ch) + default: + s += fmt.Sprintf(" %02x", ch) + } + } + return s +} + +// Render a pseudo-diff between two blocks of bytes. +func bytediff(a []byte, b []byte) string { + const rowLen = 32 + s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b)) + for offset := 0; len(a)+len(b) > 0; offset += rowLen { + na, nb := rowLen, rowLen + if na > len(a) { + na = len(a) + } + if nb > len(b) { + nb = len(b) + } + sa := bytestr(offset, a[0:na]) + sb := bytestr(offset, b[0:nb]) + if sa != sb { + s += fmt.Sprintf("-%v\n+%v\n", sa, sb) + } + a = a[na:] + b = b[nb:] + } + return s +} + +func TestWriter(t *testing.T) { +testLoop: + for i, test := range writerTests { + expected, err := ioutil.ReadFile(test.file) + if err != nil { + t.Errorf("test %d: Unexpected error: %v", i, err) + continue + } + + buf := new(bytes.Buffer) + tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB + big := false + for j, entry := range test.entries { + big = big || entry.header.Size > 1<<10 + if err := tw.WriteHeader(entry.header); err != nil { + t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err) + continue testLoop + } + if _, err := io.WriteString(tw, entry.contents); err != nil { + t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err) + continue testLoop + } + } + // Only interested in Close failures for the small tests. + if err := tw.Close(); err != nil && !big { + t.Errorf("test %d: Failed closing archive: %v", i, err) + continue testLoop + } + + actual := buf.Bytes() + if !bytes.Equal(expected, actual) { + t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v", + i, bytediff(expected, actual)) + } + if testing.Short() { // The second test is expensive. + break + } + } +} + +func TestPax(t *testing.T) { + // Create an archive with a large name + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + // Force a PAX long name to be written + longName := strings.Repeat("ab", 100) + contents := strings.Repeat(" ", int(hdr.Size)) + hdr.Name = longName + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long file name") + } +} + +func TestPaxSymlink(t *testing.T) { + // Create an archive with a large linkname + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeSymlink + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long linkname to be written + longLinkname := strings.Repeat("1234567890/1234567890", 10) + hdr.Linkname = longLinkname + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Linkname != longLinkname { + t.Fatal("Couldn't recover long link name") + } +} + +func TestPaxNonAscii(t *testing.T) { + // Create an archive with non ascii. These should trigger a pax header + // because pax headers have a defined utf-8 encoding. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + + // some sample data + chineseFilename := "文件名" + chineseGroupname := "組" + chineseUsername := "用戶名" + + hdr.Name = chineseFilename + hdr.Gname = chineseGroupname + hdr.Uname = chineseUsername + + contents := strings.Repeat(" ", int(hdr.Size)) + + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { + t.Fatal("Expected at least one PAX header to be written.") + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != chineseFilename { + t.Fatal("Couldn't recover unicode name") + } + if hdr.Gname != chineseGroupname { + t.Fatal("Couldn't recover unicode group") + } + if hdr.Uname != chineseUsername { + t.Fatal("Couldn't recover unicode user") + } +} + +func TestPaxXattrs(t *testing.T) { + xattrs := map[string]string{ + "user.key": "value", + } + + // Create an archive with an xattr + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + contents := "Kilts" + hdr.Xattrs = xattrs + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get the xattrs back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(hdr.Xattrs, xattrs) { + t.Fatalf("xattrs did not survive round trip: got %+v, want %+v", + hdr.Xattrs, xattrs) + } +} + +func TestPaxHeadersSorted(t *testing.T) { + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + if err != nil { + t.Fatalf("os.Stat: %v", err) + } + contents := strings.Repeat(" ", int(hdr.Size)) + + hdr.Xattrs = map[string]string{ + "foo": "foo", + "bar": "bar", + "baz": "baz", + "qux": "qux", + } + + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if _, err = writer.Write([]byte(contents)); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Simple test to make sure PAX extensions are in effect + if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) { + t.Fatal("Expected at least one PAX header to be written.") + } + + // xattr bar should always appear before others + indices := []int{ + bytes.Index(buf.Bytes(), []byte("bar=bar")), + bytes.Index(buf.Bytes(), []byte("baz=baz")), + bytes.Index(buf.Bytes(), []byte("foo=foo")), + bytes.Index(buf.Bytes(), []byte("qux=qux")), + } + if !sort.IntsAreSorted(indices) { + t.Fatal("PAX headers are not sorted") + } +} + +func TestUSTARLongName(t *testing.T) { + // Create an archive with a path that failed to split with USTAR extension in previous versions. + fileinfo, err := os.Stat("testdata/small.txt") + if err != nil { + t.Fatal(err) + } + hdr, err := FileInfoHeader(fileinfo, "") + hdr.Typeflag = TypeDir + if err != nil { + t.Fatalf("os.Stat:1 %v", err) + } + // Force a PAX long name to be written. The name was taken from a practical example + // that fails and replaced ever char through numbers to anonymize the sample. + longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/" + hdr.Name = longName + + hdr.Size = 0 + var buf bytes.Buffer + writer := NewWriter(&buf) + if err := writer.WriteHeader(hdr); err != nil { + t.Fatal(err) + } + if err := writer.Close(); err != nil { + t.Fatal(err) + } + // Test that we can get a long name back out of the archive. + reader := NewReader(&buf) + hdr, err = reader.Next() + if err != nil { + t.Fatal(err) + } + if hdr.Name != longName { + t.Fatal("Couldn't recover long name") + } +} + +func TestValidTypeflagWithPAXHeader(t *testing.T) { + var buffer bytes.Buffer + tw := NewWriter(&buffer) + + fileName := strings.Repeat("ab", 100) + + hdr := &Header{ + Name: fileName, + Size: 4, + Typeflag: 0, + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("Failed to write header: %s", err) + } + if _, err := tw.Write([]byte("fooo")); err != nil { + t.Fatalf("Failed to write the file's data: %s", err) + } + tw.Close() + + tr := NewReader(&buffer) + + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("Failed to read header: %s", err) + } + if header.Typeflag != 0 { + t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag) + } + } +} + +func TestWriteAfterClose(t *testing.T) { + var buffer bytes.Buffer + tw := NewWriter(&buffer) + + hdr := &Header{ + Name: "small.txt", + Size: 5, + } + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("Failed to write header: %s", err) + } + tw.Close() + if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose { + t.Fatalf("Write: got %v; want ErrWriteAfterClose", err) + } +} + +func TestSplitUSTARPath(t *testing.T) { + var sr = strings.Repeat + + var vectors = []struct { + input string // Input path + prefix string // Expected output prefix + suffix string // Expected output suffix + ok bool // Split success? + }{ + {"", "", "", false}, + {"abc", "", "", false}, + {"用戶名", "", "", false}, + {sr("a", fileNameSize), "", "", false}, + {sr("a", fileNameSize) + "/", "", "", false}, + {sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true}, + {sr("a", fileNamePrefixSize) + "/", "", "", false}, + {sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true}, + {sr("a", fileNameSize+1), "", "", false}, + {sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true}, + {sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize), + sr("a", fileNamePrefixSize), sr("b", fileNameSize), true}, + {sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false}, + {sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true}, + } + + for _, v := range vectors { + prefix, suffix, ok := splitUSTARPath(v.input) + if prefix != v.prefix || suffix != v.suffix || ok != v.ok { + t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)", + v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok) + } + } +} + +func TestFormatPAXRecord(t *testing.T) { + var medName = strings.Repeat("CD", 50) + var longName = strings.Repeat("AB", 100) + + var vectors = []struct { + inputKey string + inputVal string + output string + }{ + {"k", "v", "6 k=v\n"}, + {"path", "/etc/hosts", "19 path=/etc/hosts\n"}, + {"path", longName, "210 path=" + longName + "\n"}, + {"path", medName, "110 path=" + medName + "\n"}, + {"foo", "ba", "9 foo=ba\n"}, + {"foo", "bar", "11 foo=bar\n"}, + {"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"}, + {"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"}, + {"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"}, + {"\x00hello", "\x00world", "17 \x00hello=\x00world\n"}, + } + + for _, v := range vectors { + output := formatPAXRecord(v.inputKey, v.inputVal) + if output != v.output { + t.Errorf("formatPAXRecord(%q, %q): got %q, want %q", + v.inputKey, v.inputVal, output, v.output) + } + } +} + +func TestFitsInBase256(t *testing.T) { + var vectors = []struct { + input int64 + width int + ok bool + }{ + {+1, 8, true}, + {0, 8, true}, + {-1, 8, true}, + {1 << 56, 8, false}, + {(1 << 56) - 1, 8, true}, + {-1 << 56, 8, true}, + {(-1 << 56) - 1, 8, false}, + {121654, 8, true}, + {-9849849, 8, true}, + {math.MaxInt64, 9, true}, + {0, 9, true}, + {math.MinInt64, 9, true}, + {math.MaxInt64, 12, true}, + {0, 12, true}, + {math.MinInt64, 12, true}, + } + + for _, v := range vectors { + ok := fitsInBase256(v.width, v.input) + if ok != v.ok { + t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok) + } + } +} + +func TestFormatNumeric(t *testing.T) { + var vectors = []struct { + input int64 + output string + ok bool + }{ + // Test base-256 (binary) encoded values. + {-1, "\xff", true}, + {-1, "\xff\xff", true}, + {-1, "\xff\xff\xff", true}, + {(1 << 0), "0", false}, + {(1 << 8) - 1, "\x80\xff", true}, + {(1 << 8), "0\x00", false}, + {(1 << 16) - 1, "\x80\xff\xff", true}, + {(1 << 16), "00\x00", false}, + {-1 * (1 << 0), "\xff", true}, + {-1*(1<<0) - 1, "0", false}, + {-1 * (1 << 8), "\xff\x00", true}, + {-1*(1<<8) - 1, "0\x00", false}, + {-1 * (1 << 16), "\xff\x00\x00", true}, + {-1*(1<<16) - 1, "00\x00", false}, + {537795476381659745, "0000000\x00", false}, + {537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true}, + {-615126028225187231, "0000000\x00", false}, + {-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true}, + {math.MaxInt64, "0000000\x00", false}, + {math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true}, + {math.MinInt64, "0000000\x00", false}, + {math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true}, + {math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true}, + {math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true}, + } + + for _, v := range vectors { + var f formatter + output := make([]byte, len(v.output)) + f.formatNumeric(output, v.input) + ok := (f.err == nil) + if ok != v.ok { + if v.ok { + t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input) + } else { + t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input) + } + } + if string(output) != v.output { + t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output) + } + } +} + +func TestFormatPAXTime(t *testing.T) { + t1 := time.Date(2000, 1, 1, 11, 0, 0, 0, time.UTC) + t2 := time.Date(2000, 1, 1, 11, 0, 0, 100, time.UTC) + t3 := time.Date(1960, 1, 1, 11, 0, 0, 0, time.UTC) + t4 := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) + verify := func(time time.Time, s string) { + p := formatPAXTime(time) + if p != s { + t.Errorf("for %v, expected %s, got %s", time, s, p) + } + } + verify(t1, "946724400") + verify(t2, "946724400.000000100") + verify(t3, "-315579600") + verify(t4, "0") +} diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 00000000..2be34af4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,280 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/backup_test.go b/vendor/github.com/Microsoft/go-winio/backup_test.go new file mode 100644 index 00000000..cc5a0c5f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup_test.go @@ -0,0 +1,255 @@ +package winio + +import ( + "io" + "io/ioutil" + "os" + "syscall" + "testing" +) + +var testFileName string + +func TestMain(m *testing.M) { + f, err := ioutil.TempFile("", "tmp") + if err != nil { + panic(err) + } + testFileName = f.Name() + f.Close() + defer os.Remove(testFileName) + os.Exit(m.Run()) +} + +func makeTestFile(makeADS bool) error { + os.Remove(testFileName) + f, err := os.Create(testFileName) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write([]byte("testing 1 2 3\n")) + if err != nil { + return err + } + if makeADS { + a, err := os.Create(testFileName + ":ads.txt") + if err != nil { + return err + } + defer a.Close() + _, err = a.Write([]byte("alternate data stream\n")) + if err != nil { + return err + } + } + return nil +} + +func TestBackupRead(t *testing.T) { + err := makeTestFile(true) + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := NewBackupFileReader(f, false) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if len(b) == 0 { + t.Fatal("no data") + } +} + +func TestBackupStreamRead(t *testing.T) { + err := makeTestFile(true) + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := NewBackupFileReader(f, false) + defer r.Close() + + br := NewBackupStreamReader(r) + gotData := false + gotAltData := false + for { + hdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + + switch hdr.Id { + case BackupData: + if gotData { + t.Fatal("duplicate data") + } + if hdr.Name != "" { + t.Fatalf("unexpected name %s", hdr.Name) + } + b, err := ioutil.ReadAll(br) + if err != nil { + t.Fatal(err) + } + if string(b) != "testing 1 2 3\n" { + t.Fatalf("incorrect data %v", b) + } + gotData = true + case BackupAlternateData: + if gotAltData { + t.Fatal("duplicate alt data") + } + if hdr.Name != ":ads.txt:$DATA" { + t.Fatalf("incorrect name %s", hdr.Name) + } + b, err := ioutil.ReadAll(br) + if err != nil { + t.Fatal(err) + } + if string(b) != "alternate data stream\n" { + t.Fatalf("incorrect data %v", b) + } + gotAltData = true + default: + t.Fatalf("unknown stream ID %d", hdr.Id) + } + } + if !gotData || !gotAltData { + t.Fatal("missing stream") + } +} + +func TestBackupStreamWrite(t *testing.T) { + f, err := os.Create(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + w := NewBackupFileWriter(f, false) + defer w.Close() + + data := "testing 1 2 3\n" + altData := "alternate stream\n" + + br := NewBackupStreamWriter(w) + err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))}) + if err != nil { + t.Fatal(err) + } + n, err := br.Write([]byte(data)) + if err != nil { + t.Fatal(err) + } + if n != len(data) { + t.Fatal("short write") + } + + err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"}) + if err != nil { + t.Fatal(err) + } + n, err = br.Write([]byte(altData)) + if err != nil { + t.Fatal(err) + } + if n != len(altData) { + t.Fatal("short write") + } + + f.Close() + + b, err := ioutil.ReadFile(testFileName) + if err != nil { + t.Fatal(err) + } + if string(b) != data { + t.Fatalf("wrong data %v", b) + } + + b, err = ioutil.ReadFile(testFileName + ":ads.txt") + if err != nil { + t.Fatal(err) + } + if string(b) != altData { + t.Fatalf("wrong data %v", b) + } +} + +func makeSparseFile() error { + os.Remove(testFileName) + f, err := os.Create(testFileName) + if err != nil { + return err + } + defer f.Close() + + const ( + FSCTL_SET_SPARSE = 0x000900c4 + FSCTL_SET_ZERO_DATA = 0x000980c8 + ) + + err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil) + if err != nil { + return err + } + + _, err = f.Write([]byte("testing 1 2 3\n")) + if err != nil { + return err + } + + _, err = f.Seek(1000000, 0) + if err != nil { + return err + } + + _, err = f.Write([]byte("more data later\n")) + if err != nil { + return err + } + + return nil +} + +func TestBackupSparseFile(t *testing.T) { + err := makeSparseFile() + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := NewBackupFileReader(f, false) + defer r.Close() + + br := NewBackupStreamReader(r) + for { + hdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + + t.Log(hdr) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go new file mode 100644 index 00000000..d39eccf0 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go @@ -0,0 +1,4 @@ +// +build !windows +// This file only exists to allow go get on non-Windows platforms. + +package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go new file mode 100644 index 00000000..d6566dbf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -0,0 +1,439 @@ +// +build windows + +package backuptar + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface +) + +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +const ( + hdrFileAttributes = "fileattr" + hdrSecurityDescriptor = "sd" + hdrRawSecurityDescriptor = "rawsd" + hdrMountPoint = "mountpoint" + hdrEaPrefix = "xattr." +) + +func writeZeroes(w io.Writer, count int64) error { + buf := make([]byte, 8192) + c := len(buf) + for i := int64(0); i < count; i += int64(c) { + if int64(c) > count-i { + c = int(count - i) + } + _, err := w.Write(buf[:c]) + if err != nil { + return err + } + } + return nil +} + +func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { + curOffset := int64(0) + for { + bhdr, err := br.Next() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if bhdr.Id != winio.BackupSparseBlock { + return fmt.Errorf("unexpected stream %d", bhdr.Id) + } + + // archive/tar does not support writing sparse files + // so just write zeroes to catch up to the current offset. + err = writeZeroes(t, bhdr.Offset-curOffset) + if bhdr.Size == 0 { + break + } + n, err := io.Copy(t, br) + if err != nil { + return err + } + curOffset = bhdr.Offset + n + } + return nil +} + +// BasicInfoHeader creates a tar header from basic file information. +func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { + hdr := &tar.Header{ + Name: filepath.ToSlash(name), + Size: size, + Typeflag: tar.TypeReg, + ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), + ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), + AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), + CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()), + Winheaders: make(map[string]string), + } + hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) + + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + hdr.Mode |= c_ISDIR + hdr.Size = 0 + hdr.Typeflag = tar.TypeDir + } + return hdr +} + +// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. +// +// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. +// +// The additional Win32 metadata is: +// +// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value +// +// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format +// +// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) +func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { + name = filepath.ToSlash(name) + hdr := BasicInfoHeader(name, size, fileInfo) + + // If r can be seeked, then this function is two-pass: pass 1 collects the + // tar header data, and pass 2 copies the data stream. If r cannot be + // seeked, then some header data (in particular EAs) will be silently lost. + var ( + restartPos int64 + err error + ) + sr, readTwice := r.(io.Seeker) + if readTwice { + if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { + readTwice = false + } + } + + br := winio.NewBackupStreamReader(r) + var dataHdr *winio.BackupHeader + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupData: + hdr.Mode |= c_ISREG + if !readTwice { + dataHdr = bhdr + } + case winio.BackupSecurity: + sd, err := ioutil.ReadAll(br) + if err != nil { + return err + } + hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) + + case winio.BackupReparseData: + hdr.Mode |= c_ISLNK + hdr.Typeflag = tar.TypeSymlink + reparseBuffer, err := ioutil.ReadAll(br) + rp, err := winio.DecodeReparsePoint(reparseBuffer) + if err != nil { + return err + } + if rp.IsMountPoint { + hdr.Winheaders[hdrMountPoint] = "1" + } + hdr.Linkname = rp.Target + + case winio.BackupEaData: + eab, err := ioutil.ReadAll(br) + if err != nil { + return err + } + eas, err := winio.DecodeExtendedAttributes(eab) + if err != nil { + return err + } + for _, ea := range eas { + // Use base64 encoding for the binary value. Note that there + // is no way to encode the EA's flags, since their use doesn't + // make any sense for persisted EAs. + hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) + } + + case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) + } + } + + err = t.WriteHeader(hdr) + if err != nil { + return err + } + + if readTwice { + // Get back to the data stream. + if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { + return err + } + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if bhdr.Id == winio.BackupData { + dataHdr = bhdr + } + } + } + + if dataHdr != nil { + // A data stream was found. Copy the data. + if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 { + if size != dataHdr.Size { + return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) + } + _, err = io.Copy(t, br) + if err != nil { + return err + } + } else { + err = copySparse(t, br) + if err != nil { + return err + } + } + } + + // Look for streams after the data stream. The only ones we handle are alternate data streams. + // Other streams may have metadata that could be serialized, but the tar header has already + // been written. In practice, this means that we don't get EA or TXF metadata. + for { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupAlternateData: + altName := bhdr.Name + if strings.HasSuffix(altName, ":$DATA") { + altName = altName[:len(altName)-len(":$DATA")] + } + if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { + hdr = &tar.Header{ + Name: name + altName, + Mode: hdr.Mode, + Typeflag: tar.TypeReg, + Size: bhdr.Size, + ModTime: hdr.ModTime, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + err = t.WriteHeader(hdr) + if err != nil { + return err + } + _, err = io.Copy(t, br) + if err != nil { + return err + } + + } else { + // Unsupported for now, since the size of the alternate stream is not present + // in the backup stream until after the data has been read. + return errors.New("tar of sparse alternate data streams is unsupported") + } + case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) + } + } + return nil +} + +// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by +// WriteTarFileFromBackupStream. +func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { + name = hdr.Name + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + size = hdr.Size + } + fileInfo = &winio.FileBasicInfo{ + LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()), + LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()), + ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()), + CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()), + } + if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok { + attr, err := strconv.ParseUint(attrStr, 10, 32) + if err != nil { + return "", 0, nil, err + } + fileInfo.FileAttributes = uint32(attr) + } else { + if hdr.Typeflag == tar.TypeDir { + fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY + } + } + return +} + +// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple +// tar file entries in order to collect all the alternate data streams for the file, it returns the next +// tar file that was not processed, or io.EOF is there are no more. +func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { + bw := winio.NewBackupStreamWriter(w) + var sd []byte + var err error + // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written + // by this library will have raw binary for the security descriptor. + if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok { + sd, err = winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return nil, err + } + } + if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok { + sd, err = base64.StdEncoding.DecodeString(sdraw) + if err != nil { + return nil, err + } + } + if len(sd) != 0 { + bhdr := winio.BackupHeader{ + Id: winio.BackupSecurity, + Size: int64(len(sd)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(sd) + if err != nil { + return nil, err + } + } + var eas []winio.ExtendedAttribute + for k, v := range hdr.Winheaders { + if !strings.HasPrefix(k, hdrEaPrefix) { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, err + } + eas = append(eas, winio.ExtendedAttribute{ + Name: k[len(hdrEaPrefix):], + Value: data, + }) + } + if len(eas) != 0 { + eadata, err := winio.EncodeExtendedAttributes(eas) + if err != nil { + return nil, err + } + bhdr := winio.BackupHeader{ + Id: winio.BackupEaData, + Size: int64(len(eadata)), + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(eadata) + if err != nil { + return nil, err + } + } + if hdr.Typeflag == tar.TypeSymlink { + _, isMountPoint := hdr.Winheaders[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + reparse := winio.EncodeReparsePoint(&rp) + bhdr := winio.BackupHeader{ + Id: winio.BackupReparseData, + Size: int64(len(reparse)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(reparse) + if err != nil { + return nil, err + } + } + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + bhdr := winio.BackupHeader{ + Id: winio.BackupData, + Size: hdr.Size, + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } + // Copy all the alternate data streams and return the next non-ADS header. + for { + ahdr, err := t.Next() + if err != nil { + return nil, err + } + if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { + return ahdr, nil + } + bhdr := winio.BackupHeader{ + Id: winio.BackupAlternateData, + Size: ahdr.Size, + Name: ahdr.Name[len(hdr.Name):] + ":$DATA", + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go new file mode 100644 index 00000000..e04d47f2 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go @@ -0,0 +1,84 @@ +package backuptar + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" +) + +func ensurePresent(t *testing.T, m map[string]string, keys ...string) { + for _, k := range keys { + if _, ok := m[k]; !ok { + t.Error(k, "not present in tar header") + } + } +} + +func TestRoundTrip(t *testing.T) { + f, err := ioutil.TempFile("", "tst") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.Remove(f.Name()) + + if _, err = f.Write([]byte("testing 1 2 3\n")); err != nil { + t.Fatal(err) + } + + if _, err = f.Seek(0, 0); err != nil { + t.Fatal(err) + } + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + bi, err := winio.GetFileBasicInfo(f) + if err != nil { + t.Fatal(err) + } + + br := winio.NewBackupFileReader(f, true) + defer br.Close() + + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + + err = WriteTarFileFromBackupStream(tw, br, f.Name(), fi.Size(), bi) + if err != nil { + t.Fatal(err) + } + + tr := tar.NewReader(&buf) + hdr, err := tr.Next() + if err != nil { + t.Fatal(err) + } + + name, size, bi2, err := FileInfoFromHeader(hdr) + if err != nil { + t.Fatal(err) + } + + if name != filepath.ToSlash(f.Name()) { + t.Errorf("got name %s, expected %s", name, filepath.ToSlash(f.Name())) + } + + if size != fi.Size() { + t.Errorf("got size %d, expected %d", size, fi.Size()) + } + + if !reflect.DeepEqual(*bi, *bi2) { + t.Errorf("got %#v, expected %#v", *bi, *bi2) + } + + ensurePresent(t, hdr.Winheaders, "fileattr", "rawsd") +} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 00000000..4051c1b3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/ea_test.go b/vendor/github.com/Microsoft/go-winio/ea_test.go new file mode 100644 index 00000000..27db14ff --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea_test.go @@ -0,0 +1,89 @@ +package winio + +import ( + "io/ioutil" + "os" + "reflect" + "syscall" + "testing" + "unsafe" +) + +var ( + testEas = []ExtendedAttribute{ + {Name: "foo", Value: []byte("bar")}, + {Name: "fizz", Value: []byte("buzz")}, + } + + testEasEncoded = []byte{16, 0, 0, 0, 0, 3, 3, 0, 102, 111, 111, 0, 98, 97, 114, 0, 0, 0, 0, 0, 0, 4, 4, 0, 102, 105, 122, 122, 0, 98, 117, 122, 122, 0, 0, 0} + testEasNotPadded = testEasEncoded[0 : len(testEasEncoded)-3] + testEasTruncated = testEasEncoded[0:20] +) + +func Test_RoundTripEas(t *testing.T) { + b, err := EncodeExtendedAttributes(testEas) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEasEncoded, b) { + t.Fatalf("encoded mismatch %v %v", testEasEncoded, b) + } + eas, err := DecodeExtendedAttributes(b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func Test_EasDontNeedPaddingAtEnd(t *testing.T) { + eas, err := DecodeExtendedAttributes(testEasNotPadded) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func Test_TruncatedEasFailCorrectly(t *testing.T) { + _, err := DecodeExtendedAttributes(testEasTruncated) + if err == nil { + t.Fatal("expected error") + } +} + +func Test_NilEasEncodeAndDecodeAsNil(t *testing.T) { + b, err := EncodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(b) != 0 { + t.Fatal("expected empty") + } + eas, err := DecodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(eas) != 0 { + t.Fatal("expected empty") + } +} + +// Test_SetFileEa makes sure that the test buffer is actually parsable by NtSetEaFile. +func Test_SetFileEa(t *testing.T) { + f, err := ioutil.TempFile("", "winio") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + defer f.Close() + ntdll := syscall.MustLoadDLL("ntdll.dll") + ntSetEaFile := ntdll.MustFindProc("NtSetEaFile") + var iosb [2]uintptr + r, _, _ := ntSetEaFile.Call(f.Fd(), uintptr(unsafe.Pointer(&iosb[0])), uintptr(unsafe.Pointer(&testEasEncoded[0])), uintptr(len(testEasEncoded))) + if r != 0 { + t.Fatalf("NtSetEaFile failed with %08x", r) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 00000000..4334ff1c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,307 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return makeWin32File(h) +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing.isSet() { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing.isSet() { + err = ErrFileClosed + } + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 00000000..ada2fbab --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,61 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "syscall" + "unsafe" +) + +//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx +//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle + +const ( + fileBasicInfo = 0 + fileIDInfo = 0x12 +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + FileAttributes uint32 + pad uint32 // padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/etw.go b/vendor/github.com/Microsoft/go-winio/internal/etw/etw.go new file mode 100644 index 00000000..88214fba --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/etw.go @@ -0,0 +1,15 @@ +// Package etw provides support for TraceLogging-based ETW (Event Tracing +// for Windows). TraceLogging is a format of ETW events that are self-describing +// (the event contains information on its own schema). This allows them to be +// decoded without needing a separate manifest with event information. The +// implementation here is based on the information found in +// TraceLoggingProvider.h in the Windows SDK, which implements TraceLogging as a +// set of C macros. +package etw + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go etw.go + +//sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister +//sys eventUnregister(providerHandle providerHandle) (win32err error) = advapi32.EventUnregister +//sys eventWriteTransfer(providerHandle providerHandle, descriptor *EventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) = advapi32.EventWriteTransfer +//sys eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) = advapi32.EventSetInformation diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/eventdata.go b/vendor/github.com/Microsoft/go-winio/internal/etw/eventdata.go new file mode 100644 index 00000000..32cf5681 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/eventdata.go @@ -0,0 +1,65 @@ +package etw + +import ( + "bytes" + "encoding/binary" +) + +// EventData maintains a buffer which builds up the data for an ETW event. It +// needs to be paired with EventMetadata which describes the event. +type EventData struct { + buffer bytes.Buffer +} + +// Bytes returns the raw binary data containing the event data. The returned +// value is not copied from the internal buffer, so it can be mutated by the +// EventData object after it is returned. +func (ed *EventData) Bytes() []byte { + return ed.buffer.Bytes() +} + +// WriteString appends a string, including the null terminator, to the buffer. +func (ed *EventData) WriteString(data string) { + ed.buffer.WriteString(data) + ed.buffer.WriteByte(0) +} + +// WriteInt8 appends a int8 to the buffer. +func (ed *EventData) WriteInt8(value int8) { + ed.buffer.WriteByte(uint8(value)) +} + +// WriteInt16 appends a int16 to the buffer. +func (ed *EventData) WriteInt16(value int16) { + binary.Write(&ed.buffer, binary.LittleEndian, value) +} + +// WriteInt32 appends a int32 to the buffer. +func (ed *EventData) WriteInt32(value int32) { + binary.Write(&ed.buffer, binary.LittleEndian, value) +} + +// WriteInt64 appends a int64 to the buffer. +func (ed *EventData) WriteInt64(value int64) { + binary.Write(&ed.buffer, binary.LittleEndian, value) +} + +// WriteUint8 appends a uint8 to the buffer. +func (ed *EventData) WriteUint8(value uint8) { + ed.buffer.WriteByte(value) +} + +// WriteUint16 appends a uint16 to the buffer. +func (ed *EventData) WriteUint16(value uint16) { + binary.Write(&ed.buffer, binary.LittleEndian, value) +} + +// WriteUint32 appends a uint32 to the buffer. +func (ed *EventData) WriteUint32(value uint32) { + binary.Write(&ed.buffer, binary.LittleEndian, value) +} + +// WriteUint64 appends a uint64 to the buffer. +func (ed *EventData) WriteUint64(value uint64) { + binary.Write(&ed.buffer, binary.LittleEndian, value) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/eventdatadescriptor.go b/vendor/github.com/Microsoft/go-winio/internal/etw/eventdatadescriptor.go new file mode 100644 index 00000000..8b0ad481 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/eventdatadescriptor.go @@ -0,0 +1,29 @@ +package etw + +import ( + "unsafe" +) + +type eventDataDescriptorType uint8 + +const ( + eventDataDescriptorTypeUserData eventDataDescriptorType = iota + eventDataDescriptorTypeEventMetadata + eventDataDescriptorTypeProviderMetadata +) + +type eventDataDescriptor struct { + ptr ptr64 + size uint32 + dataType eventDataDescriptorType + reserved1 uint8 + reserved2 uint16 +} + +func newEventDataDescriptor(dataType eventDataDescriptorType, buffer []byte) eventDataDescriptor { + return eventDataDescriptor{ + ptr: ptr64{ptr: unsafe.Pointer(&buffer[0])}, + size: uint32(len(buffer)), + dataType: dataType, + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/eventdescriptor.go b/vendor/github.com/Microsoft/go-winio/internal/etw/eventdescriptor.go new file mode 100644 index 00000000..23980b38 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/eventdescriptor.go @@ -0,0 +1,67 @@ +package etw + +// Channel represents the ETW logging channel that is used. It can be used by +// event consumers to give an event special treatment. +type Channel uint8 + +const ( + // ChannelTraceLogging is the default channel for TraceLogging events. It is + // not required to be used for TraceLogging, but will prevent decoding + // issues for these events on older operating systems. + ChannelTraceLogging Channel = 11 +) + +// Level represents the ETW logging level. There are several predefined levels +// that are commonly used, but technically anything from 0-255 is allowed. +// Lower levels indicate more important events, and 0 indicates an event that +// will always be collected. +type Level uint8 + +// Predefined ETW log levels. +const ( + LevelAlways Level = iota + LevelCritical + LevelError + LevelWarning + LevelInfo + LevelVerbose +) + +// EventDescriptor represents various metadata for an ETW event. +type EventDescriptor struct { + id uint16 + version uint8 + Channel Channel + Level Level + Opcode uint8 + Task uint16 + Keyword uint64 +} + +// NewEventDescriptor returns an EventDescriptor initialized for use with +// TraceLogging. +func NewEventDescriptor() *EventDescriptor { + // Standard TraceLogging events default to the TraceLogging channel, and + // verbose level. + return &EventDescriptor{ + Channel: ChannelTraceLogging, + Level: LevelVerbose, + } +} + +// Identity returns the identity of the event. If the identity is not 0, it +// should uniquely identify the other event metadata (contained in +// EventDescriptor, and field metadata). Only the lower 24 bits of this value +// are relevant. +func (ed *EventDescriptor) Identity() uint32 { + return (uint32(ed.version) << 16) | uint32(ed.id) +} + +// SetIdentity sets the identity of the event. If the identity is not 0, it +// should uniquely identify the other event metadata (contained in +// EventDescriptor, and field metadata). Only the lower 24 bits of this value +// are relevant. +func (ed *EventDescriptor) SetIdentity(identity uint32) { + ed.id = uint16(identity) + ed.version = uint8(identity >> 16) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/eventmetadata.go b/vendor/github.com/Microsoft/go-winio/internal/etw/eventmetadata.go new file mode 100644 index 00000000..e97ede03 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/eventmetadata.go @@ -0,0 +1,177 @@ +package etw + +import ( + "bytes" + "encoding/binary" +) + +// InType indicates the type of data contained in the ETW event. +type InType byte + +// Various InType definitions for TraceLogging. These must match the definitions +// found in TraceLoggingProvider.h in the Windows SDK. +const ( + InTypeNull InType = iota + InTypeUnicodeString + InTypeANSIString + InTypeInt8 + InTypeUint8 + InTypeInt16 + InTypeUint16 + InTypeInt32 + InTypeUint32 + InTypeInt64 + InTypeUint64 + InTypeFloat + InTypeDouble + InTypeBool32 + InTypeBinary + InTypeGUID + InTypePointerUnsupported + InTypeFileTime + InTypeSystemTime + InTypeSID + InTypeHexInt32 + InTypeHexInt64 + InTypeCountedString + InTypeCountedANSIString + InTypeStruct + InTypeCountedBinary + InTypeCountedArray InType = 32 + InTypeArray InType = 64 +) + +// OutType specifies a hint to the event decoder for how the value should be +// formatted. +type OutType byte + +// Various OutType definitions for TraceLogging. These must match the +// definitions found in TraceLoggingProvider.h in the Windows SDK. +const ( + // OutTypeDefault indicates that the default formatting for the InType will + // be used by the event decoder. + OutTypeDefault OutType = iota + OutTypeNoPrint + OutTypeString + OutTypeBoolean + OutTypeHex + OutTypePID + OutTypeTID + OutTypePort + OutTypeIPv4 + OutTypeIPv6 + OutTypeSocketAddress + OutTypeXML + OutTypeJSON + OutTypeWin32Error + OutTypeNTStatus + OutTypeHResult + OutTypeFileTime + OutTypeSigned + OutTypeUnsigned + OutTypeUTF8 OutType = 35 + OutTypePKCS7WithTypeInfo OutType = 36 + OutTypeCodePointer OutType = 37 + OutTypeDateTimeUTC OutType = 38 +) + +// EventMetadata maintains a buffer which builds up the metadata for an ETW +// event. It needs to be paired with EventData which describes the event. +type EventMetadata struct { + buffer bytes.Buffer +} + +// Bytes returns the raw binary data containing the event metadata. Before being +// returned, the current size of the buffer is written to the start of the +// buffer. The returned value is not copied from the internal buffer, so it can +// be mutated by the EventMetadata object after it is returned. +func (em *EventMetadata) Bytes() []byte { + // Finalize the event metadata buffer by filling in the buffer length at the + // beginning. + binary.LittleEndian.PutUint16(em.buffer.Bytes(), uint16(em.buffer.Len())) + return em.buffer.Bytes() +} + +// WriteEventHeader writes the metadata for the start of an event to the buffer. +// This specifies the event name and tags. +func (em *EventMetadata) WriteEventHeader(name string, tags uint32) { + binary.Write(&em.buffer, binary.LittleEndian, uint16(0)) // Length placeholder + em.writeTags(tags) + em.buffer.WriteString(name) + em.buffer.WriteByte(0) // Null terminator for name +} + +func (em *EventMetadata) writeField(name string, inType InType, outType OutType, tags uint32, arrSize uint16) { + em.buffer.WriteString(name) + em.buffer.WriteByte(0) // Null terminator for name + + if outType == OutTypeDefault && tags == 0 { + em.buffer.WriteByte(byte(inType)) + } else { + em.buffer.WriteByte(byte(inType | 128)) + if tags == 0 { + em.buffer.WriteByte(byte(outType)) + } else { + em.buffer.WriteByte(byte(outType | 128)) + em.writeTags(tags) + } + } + + if arrSize != 0 { + binary.Write(&em.buffer, binary.LittleEndian, arrSize) + } +} + +// writeTags writes out the tags value to the event metadata. Tags is a 28-bit +// value, interpreted as bit flags, which are only relevant to the event +// consumer. The event consumer may choose to attribute special meaning to tags +// (e.g. 0x4 could mean the field contains PII). Tags are written as a series of +// bytes, each containing 7 bits of tag value, with the high bit set if there is +// more tag data in the following byte. This allows for a more compact +// representation when not all of the tag bits are needed. +func (em *EventMetadata) writeTags(tags uint32) { + // Only use the top 28 bits of the tags value. + tags &= 0xfffffff + + for { + // Tags are written with the most significant bits (e.g. 21-27) first. + val := tags >> 21 + + if tags&0x1fffff == 0 { + // If there is no more data to write after this, write this value + // without the high bit set, and return. + em.buffer.WriteByte(byte(val & 0x7f)) + return + } + + em.buffer.WriteByte(byte(val | 0x80)) + + tags <<= 7 + } +} + +// WriteField writes the metadata for a simple field to the buffer. +func (em *EventMetadata) WriteField(name string, inType InType, outType OutType, tags uint32) { + em.writeField(name, inType, outType, tags, 0) +} + +// WriteArray writes the metadata for an array field to the buffer. The number +// of elements in the array must be written as a uint16 in the event data, +// immediately preceeding the event data. +func (em *EventMetadata) WriteArray(name string, inType InType, outType OutType, tags uint32) { + em.writeField(name, inType|InTypeArray, outType, tags, 0) +} + +// WriteCountedArray writes the metadata for an array field to the buffer. The +// size of a counted array is fixed, and the size is written into the metadata +// directly. +func (em *EventMetadata) WriteCountedArray(name string, count uint16, inType InType, outType OutType, tags uint32) { + em.writeField(name, inType|InTypeCountedArray, outType, tags, count) +} + +// WriteStruct writes the metadata for a nested struct to the buffer. The struct +// contains the next N fields in the metadata, where N is specified by the +// fieldCount argument. +func (em *EventMetadata) WriteStruct(name string, fieldCount uint8, tags uint32) { + em.writeField(name, InTypeStruct, OutType(fieldCount), tags, 0) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/eventopt.go b/vendor/github.com/Microsoft/go-winio/internal/etw/eventopt.go new file mode 100644 index 00000000..2c82eddc --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/eventopt.go @@ -0,0 +1,63 @@ +package etw + +import ( + "golang.org/x/sys/windows" +) + +type eventOptions struct { + descriptor *EventDescriptor + activityID *windows.GUID + relatedActivityID *windows.GUID + tags uint32 +} + +// EventOpt defines the option function type that can be passed to +// Provider.WriteEvent to specify general event options, such as level and +// keyword. +type EventOpt func(options *eventOptions) + +// WithEventOpts returns the variadic arguments as a single slice. +func WithEventOpts(opts ...EventOpt) []EventOpt { + return opts +} + +// WithLevel specifies the level of the event to be written. +func WithLevel(level Level) EventOpt { + return func(options *eventOptions) { + options.descriptor.Level = level + } +} + +// WithKeyword specifies the keywords of the event to be written. Multiple uses +// of this option are OR'd together. +func WithKeyword(keyword uint64) EventOpt { + return func(options *eventOptions) { + options.descriptor.Keyword |= keyword + } +} + +func WithChannel(channel Channel) EventOpt { + return func(options *eventOptions) { + options.descriptor.Channel = channel + } +} + +// WithTags specifies the tags of the event to be written. Tags is a 28-bit +// value (top 4 bits are ignored) which are interpreted by the event consumer. +func WithTags(newTags uint32) EventOpt { + return func(options *eventOptions) { + options.tags |= newTags + } +} + +func WithActivityID(activityID *windows.GUID) EventOpt { + return func(options *eventOptions) { + options.activityID = activityID + } +} + +func WithRelatedActivityID(activityID *windows.GUID) EventOpt { + return func(options *eventOptions) { + options.relatedActivityID = activityID + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/fieldopt.go b/vendor/github.com/Microsoft/go-winio/internal/etw/fieldopt.go new file mode 100644 index 00000000..5d5b4254 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/fieldopt.go @@ -0,0 +1,379 @@ +package etw + +import ( + "math" + "unsafe" +) + +// FieldOpt defines the option function type that can be passed to +// Provider.WriteEvent to add fields to the event. +type FieldOpt func(em *EventMetadata, ed *EventData) + +// WithFields returns the variadic arguments as a single slice. +func WithFields(opts ...FieldOpt) []FieldOpt { + return opts +} + +// BoolField adds a single bool field to the event. +func BoolField(name string, value bool) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeUint8, OutTypeBoolean, 0) + bool8 := uint8(0) + if value { + bool8 = uint8(1) + } + ed.WriteUint8(bool8) + } +} + +// BoolArray adds an array of bool to the event. +func BoolArray(name string, values []bool) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeUint8, OutTypeBoolean, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + bool8 := uint8(0) + if v { + bool8 = uint8(1) + } + ed.WriteUint8(bool8) + } + } +} + +// StringField adds a single string field to the event. +func StringField(name string, value string) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeANSIString, OutTypeUTF8, 0) + ed.WriteString(value) + } +} + +// StringArray adds an array of string to the event. +func StringArray(name string, values []string) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeANSIString, OutTypeUTF8, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteString(v) + } + } +} + +// IntField adds a single int field to the event. +func IntField(name string, value int) FieldOpt { + switch unsafe.Sizeof(value) { + case 4: + return Int32Field(name, int32(value)) + case 8: + return Int64Field(name, int64(value)) + default: + panic("Unsupported int size") + } +} + +// IntArray adds an array of int to the event. +func IntArray(name string, values []int) FieldOpt { + inType := InTypeNull + var writeItem func(*EventData, int) + switch unsafe.Sizeof(values[0]) { + case 4: + inType = InTypeInt32 + writeItem = func(ed *EventData, item int) { ed.WriteInt32(int32(item)) } + case 8: + inType = InTypeInt64 + writeItem = func(ed *EventData, item int) { ed.WriteInt64(int64(item)) } + default: + panic("Unsupported int size") + } + + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, inType, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + writeItem(ed, v) + } + } +} + +// Int8Field adds a single int8 field to the event. +func Int8Field(name string, value int8) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeInt8, OutTypeDefault, 0) + ed.WriteInt8(value) + } +} + +// Int8Array adds an array of int8 to the event. +func Int8Array(name string, values []int8) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeInt8, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteInt8(v) + } + } +} + +// Int16Field adds a single int16 field to the event. +func Int16Field(name string, value int16) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeInt16, OutTypeDefault, 0) + ed.WriteInt16(value) + } +} + +// Int16Array adds an array of int16 to the event. +func Int16Array(name string, values []int16) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeInt16, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteInt16(v) + } + } +} + +// Int32Field adds a single int32 field to the event. +func Int32Field(name string, value int32) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeInt32, OutTypeDefault, 0) + ed.WriteInt32(value) + } +} + +// Int32Array adds an array of int32 to the event. +func Int32Array(name string, values []int32) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeInt32, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteInt32(v) + } + } +} + +// Int64Field adds a single int64 field to the event. +func Int64Field(name string, value int64) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeInt64, OutTypeDefault, 0) + ed.WriteInt64(value) + } +} + +// Int64Array adds an array of int64 to the event. +func Int64Array(name string, values []int64) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeInt64, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteInt64(v) + } + } +} + +// UintField adds a single uint field to the event. +func UintField(name string, value uint) FieldOpt { + switch unsafe.Sizeof(value) { + case 4: + return Uint32Field(name, uint32(value)) + case 8: + return Uint64Field(name, uint64(value)) + default: + panic("Unsupported uint size") + } +} + +// UintArray adds an array of uint to the event. +func UintArray(name string, values []uint) FieldOpt { + inType := InTypeNull + var writeItem func(*EventData, uint) + switch unsafe.Sizeof(values[0]) { + case 4: + inType = InTypeUint32 + writeItem = func(ed *EventData, item uint) { ed.WriteUint32(uint32(item)) } + case 8: + inType = InTypeUint64 + writeItem = func(ed *EventData, item uint) { ed.WriteUint64(uint64(item)) } + default: + panic("Unsupported uint size") + } + + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, inType, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + writeItem(ed, v) + } + } +} + +// Uint8Field adds a single uint8 field to the event. +func Uint8Field(name string, value uint8) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeUint8, OutTypeDefault, 0) + ed.WriteUint8(value) + } +} + +// Uint8Array adds an array of uint8 to the event. +func Uint8Array(name string, values []uint8) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeUint8, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteUint8(v) + } + } +} + +// Uint16Field adds a single uint16 field to the event. +func Uint16Field(name string, value uint16) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeUint16, OutTypeDefault, 0) + ed.WriteUint16(value) + } +} + +// Uint16Array adds an array of uint16 to the event. +func Uint16Array(name string, values []uint16) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeUint16, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteUint16(v) + } + } +} + +// Uint32Field adds a single uint32 field to the event. +func Uint32Field(name string, value uint32) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeUint32, OutTypeDefault, 0) + ed.WriteUint32(value) + } +} + +// Uint32Array adds an array of uint32 to the event. +func Uint32Array(name string, values []uint32) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeUint32, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteUint32(v) + } + } +} + +// Uint64Field adds a single uint64 field to the event. +func Uint64Field(name string, value uint64) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeUint64, OutTypeDefault, 0) + ed.WriteUint64(value) + } +} + +// Uint64Array adds an array of uint64 to the event. +func Uint64Array(name string, values []uint64) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeUint64, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteUint64(v) + } + } +} + +// UintptrField adds a single uintptr field to the event. +func UintptrField(name string, value uintptr) FieldOpt { + inType := InTypeNull + var writeItem func(*EventData, uintptr) + switch unsafe.Sizeof(value) { + case 4: + inType = InTypeHexInt32 + writeItem = func(ed *EventData, item uintptr) { ed.WriteUint32(uint32(item)) } + case 8: + inType = InTypeHexInt64 + writeItem = func(ed *EventData, item uintptr) { ed.WriteUint64(uint64(item)) } + default: + panic("Unsupported uintptr size") + } + + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, inType, OutTypeDefault, 0) + writeItem(ed, value) + } +} + +// UintptrArray adds an array of uintptr to the event. +func UintptrArray(name string, values []uintptr) FieldOpt { + inType := InTypeNull + var writeItem func(*EventData, uintptr) + switch unsafe.Sizeof(values[0]) { + case 4: + inType = InTypeHexInt32 + writeItem = func(ed *EventData, item uintptr) { ed.WriteUint32(uint32(item)) } + case 8: + inType = InTypeHexInt64 + writeItem = func(ed *EventData, item uintptr) { ed.WriteUint64(uint64(item)) } + default: + panic("Unsupported uintptr size") + } + + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, inType, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + writeItem(ed, v) + } + } +} + +// Float32Field adds a single float32 field to the event. +func Float32Field(name string, value float32) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeFloat, OutTypeDefault, 0) + ed.WriteUint32(math.Float32bits(value)) + } +} + +// Float32Array adds an array of float32 to the event. +func Float32Array(name string, values []float32) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeFloat, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteUint32(math.Float32bits(v)) + } + } +} + +// Float64Field adds a single float64 field to the event. +func Float64Field(name string, value float64) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteField(name, InTypeDouble, OutTypeDefault, 0) + ed.WriteUint64(math.Float64bits(value)) + } +} + +// Float64Array adds an array of float64 to the event. +func Float64Array(name string, values []float64) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteArray(name, InTypeDouble, OutTypeDefault, 0) + ed.WriteUint16(uint16(len(values))) + for _, v := range values { + ed.WriteUint64(math.Float64bits(v)) + } + } +} + +// Struct adds a nested struct to the event, the FieldOpts in the opts argument +// are used to specify the fields of the struct. +func Struct(name string, opts ...FieldOpt) FieldOpt { + return func(em *EventMetadata, ed *EventData) { + em.WriteStruct(name, uint8(len(opts)), 0) + for _, opt := range opts { + opt(em, ed) + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/provider.go b/vendor/github.com/Microsoft/go-winio/internal/etw/provider.go new file mode 100644 index 00000000..452c860f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/provider.go @@ -0,0 +1,279 @@ +package etw + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "fmt" + "strings" + "unicode/utf16" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Provider represents an ETW event provider. It is identified by a provider +// name and ID (GUID), which should always have a 1:1 mapping to each other +// (e.g. don't use multiple provider names with the same ID, or vice versa). +type Provider struct { + ID *windows.GUID + handle providerHandle + metadata []byte + callback EnableCallback + index uint + enabled bool + level Level + keywordAny uint64 + keywordAll uint64 +} + +// String returns the `provider`.ID as a string +func (provider *Provider) String() string { + data1 := make([]byte, 4) + binary.BigEndian.PutUint32(data1, provider.ID.Data1) + data2 := make([]byte, 2) + binary.BigEndian.PutUint16(data2, provider.ID.Data2) + data3 := make([]byte, 2) + binary.BigEndian.PutUint16(data3, provider.ID.Data3) + return fmt.Sprintf( + "%s-%s-%s-%s-%s", + hex.EncodeToString(data1), + hex.EncodeToString(data2), + hex.EncodeToString(data3), + hex.EncodeToString(provider.ID.Data4[:2]), + hex.EncodeToString(provider.ID.Data4[2:])) +} + +type providerHandle windows.Handle + +// ProviderState informs the provider EnableCallback what action is being +// performed. +type ProviderState uint32 + +const ( + // ProviderStateDisable indicates the provider is being disabled. + ProviderStateDisable ProviderState = iota + // ProviderStateEnable indicates the provider is being enabled. + ProviderStateEnable + // ProviderStateCaptureState indicates the provider is having its current + // state snap-shotted. + ProviderStateCaptureState +) + +type eventInfoClass uint32 + +const ( + eventInfoClassProviderBinaryTrackInfo eventInfoClass = iota + eventInfoClassProviderSetReserved1 + eventInfoClassProviderSetTraits + eventInfoClassProviderUseDescriptorType +) + +// EnableCallback is the form of the callback function that receives provider +// enable/disable notifications from ETW. +type EnableCallback func(*windows.GUID, ProviderState, Level, uint64, uint64, uintptr) + +func providerCallback(sourceID *windows.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) { + provider := providers.getProvider(uint(i)) + + switch state { + case ProviderStateDisable: + provider.enabled = false + case ProviderStateEnable: + provider.enabled = true + provider.level = level + provider.keywordAny = matchAnyKeyword + provider.keywordAll = matchAllKeyword + } + + if provider.callback != nil { + provider.callback(sourceID, state, level, matchAnyKeyword, matchAllKeyword, filterData) + } +} + +// providerCallbackAdapter acts as the first-level callback from the C/ETW side +// for provider notifications. Because Go has trouble with callback arguments of +// different size, it has only pointer-sized arguments, which are then cast to +// the appropriate types when calling providerCallback. +func providerCallbackAdapter(sourceID *windows.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr { + providerCallback(sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i) + return 0 +} + +// providerIDFromName generates a provider ID based on the provider name. It +// uses the same algorithm as used by .NET's EventSource class, which is based +// on RFC 4122. More information on the algorithm can be found here: +// https://blogs.msdn.microsoft.com/dcook/2015/09/08/etw-provider-names-and-guids/ +// The algorithm is roughly: +// Hash = Sha1(namespace + arg.ToUpper().ToUtf16be()) +// Guid = Hash[0..15], with Hash[7] tweaked according to RFC 4122 +func providerIDFromName(name string) *windows.GUID { + buffer := sha1.New() + + namespace := []byte{0x48, 0x2C, 0x2D, 0xB2, 0xC3, 0x90, 0x47, 0xC8, 0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB} + buffer.Write(namespace) + + binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name)))) + + sum := buffer.Sum(nil) + sum[7] = (sum[7] & 0xf) | 0x50 + + return &windows.GUID{ + Data1: binary.LittleEndian.Uint32(sum[0:4]), + Data2: binary.LittleEndian.Uint16(sum[4:6]), + Data3: binary.LittleEndian.Uint16(sum[6:8]), + Data4: [8]byte{sum[8], sum[9], sum[10], sum[11], sum[12], sum[13], sum[14], sum[15]}, + } +} + +// NewProvider creates and registers a new ETW provider. The provider ID is +// generated based on the provider name. +func NewProvider(name string, callback EnableCallback) (provider *Provider, err error) { + return NewProviderWithID(name, providerIDFromName(name), callback) +} + +// NewProviderWithID creates and registers a new ETW provider, allowing the +// provider ID to be manually specified. This is most useful when there is an +// existing provider ID that must be used to conform to existing diagnostic +// infrastructure. +func NewProviderWithID(name string, id *windows.GUID, callback EnableCallback) (provider *Provider, err error) { + providerCallbackOnce.Do(func() { + globalProviderCallback = windows.NewCallback(providerCallbackAdapter) + }) + + provider = providers.newProvider() + defer func() { + if err != nil { + providers.removeProvider(provider) + } + }() + provider.ID = id + provider.callback = callback + + if err := eventRegister(provider.ID, globalProviderCallback, uintptr(provider.index), &provider.handle); err != nil { + return nil, err + } + + metadata := &bytes.Buffer{} + binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later) + metadata.WriteString(name) + metadata.WriteByte(0) // Null terminator for name + binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer + provider.metadata = metadata.Bytes() + + if err := eventSetInformation( + provider.handle, + eventInfoClassProviderSetTraits, + uintptr(unsafe.Pointer(&provider.metadata[0])), + uint32(len(provider.metadata))); err != nil { + + return nil, err + } + + return provider, nil +} + +// Close unregisters the provider. +func (provider *Provider) Close() error { + providers.removeProvider(provider) + return eventUnregister(provider.handle) +} + +// IsEnabled calls IsEnabledForLevelAndKeywords with LevelAlways and all +// keywords set. +func (provider *Provider) IsEnabled() bool { + return provider.IsEnabledForLevelAndKeywords(LevelAlways, ^uint64(0)) +} + +// IsEnabledForLevel calls IsEnabledForLevelAndKeywords with the specified level +// and all keywords set. +func (provider *Provider) IsEnabledForLevel(level Level) bool { + return provider.IsEnabledForLevelAndKeywords(level, ^uint64(0)) +} + +// IsEnabledForLevelAndKeywords allows event producer code to check if there are +// any event sessions that are interested in an event, based on the event level +// and keywords. Although this check happens automatically in the ETW +// infrastructure, it can be useful to check if an event will actually be +// consumed before doing expensive work to build the event data. +func (provider *Provider) IsEnabledForLevelAndKeywords(level Level, keywords uint64) bool { + if !provider.enabled { + return false + } + + // ETW automatically sets the level to 255 if it is specified as 0, so we + // don't need to worry about the level=0 (all events) case. + if level > provider.level { + return false + } + + if keywords != 0 && (keywords&provider.keywordAny == 0 || keywords&provider.keywordAll != provider.keywordAll) { + return false + } + + return true +} + +// WriteEvent writes a single ETW event from the provider. The event is +// constructed based on the EventOpt and FieldOpt values that are passed as +// opts. +func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpts []FieldOpt) error { + options := eventOptions{descriptor: NewEventDescriptor()} + em := &EventMetadata{} + ed := &EventData{} + + // We need to evaluate the EventOpts first since they might change tags, and + // we write out the tags before evaluating FieldOpts. + for _, opt := range eventOpts { + opt(&options) + } + + if !provider.IsEnabledForLevelAndKeywords(options.descriptor.Level, options.descriptor.Keyword) { + return nil + } + + em.WriteEventHeader(name, options.tags) + + for _, opt := range fieldOpts { + opt(em, ed) + } + + // Don't pass a data blob if there is no event data. There will always be + // event metadata (e.g. for the name) so we don't need to do this check for + // the metadata. + dataBlobs := [][]byte{} + if len(ed.Bytes()) > 0 { + dataBlobs = [][]byte{ed.Bytes()} + } + + return provider.WriteEventRaw(options.descriptor, nil, nil, [][]byte{em.Bytes()}, dataBlobs) +} + +// WriteEventRaw writes a single ETW event from the provider. This function is +// less abstracted than WriteEvent, and presents a fairly direct interface to +// the event writing functionality. It expects a series of event metadata and +// event data blobs to be passed in, which must conform to the TraceLogging +// schema. The functions on EventMetadata and EventData can help with creating +// these blobs. The blobs of each type are effectively concatenated together by +// the ETW infrastructure. +func (provider *Provider) WriteEventRaw( + descriptor *EventDescriptor, + activityID *windows.GUID, + relatedActivityID *windows.GUID, + metadataBlobs [][]byte, + dataBlobs [][]byte) error { + + dataDescriptorCount := uint32(1 + len(metadataBlobs) + len(dataBlobs)) + dataDescriptors := make([]eventDataDescriptor, 0, dataDescriptorCount) + + dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeProviderMetadata, provider.metadata)) + for _, blob := range metadataBlobs { + dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeEventMetadata, blob)) + } + for _, blob := range dataBlobs { + dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob)) + } + + return eventWriteTransfer(provider.handle, descriptor, activityID, relatedActivityID, dataDescriptorCount, &dataDescriptors[0]) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/providerglobal.go b/vendor/github.com/Microsoft/go-winio/internal/etw/providerglobal.go new file mode 100644 index 00000000..6c7331d9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/providerglobal.go @@ -0,0 +1,52 @@ +package etw + +import ( + "sync" +) + +// Because the provider callback function needs to be able to access the +// provider data when it is invoked by ETW, we need to keep provider data stored +// in a global map based on an index. The index is passed as the callback +// context to ETW. +type providerMap struct { + m map[uint]*Provider + i uint + lock sync.Mutex + once sync.Once +} + +var providers = providerMap{ + m: make(map[uint]*Provider), +} + +func (p *providerMap) newProvider() *Provider { + p.lock.Lock() + defer p.lock.Unlock() + + i := p.i + p.i++ + + provider := &Provider{ + index: i, + } + + p.m[i] = provider + return provider +} + +func (p *providerMap) removeProvider(provider *Provider) { + p.lock.Lock() + defer p.lock.Unlock() + + delete(p.m, provider.index) +} + +func (p *providerMap) getProvider(index uint) *Provider { + p.lock.Lock() + defer p.lock.Unlock() + + return p.m[index] +} + +var providerCallbackOnce sync.Once +var globalProviderCallback uintptr diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_32.go b/vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_32.go new file mode 100644 index 00000000..d1a76125 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_32.go @@ -0,0 +1,16 @@ +// +build 386 arm + +package etw + +import ( + "unsafe" +) + +// byteptr64 defines a struct containing a pointer. The struct is guaranteed to +// be 64 bits, regardless of the actual size of a pointer on the platform. This +// is intended for use with certain Windows APIs that expect a pointer as a +// ULONGLONG. +type ptr64 struct { + ptr unsafe.Pointer + _ uint32 +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_64.go b/vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_64.go new file mode 100644 index 00000000..b86c8f2b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/ptr64_64.go @@ -0,0 +1,15 @@ +// +build amd64 arm64 + +package etw + +import ( + "unsafe" +) + +// byteptr64 defines a struct containing a pointer. The struct is guaranteed to +// be 64 bits, regardless of the actual size of a pointer on the platform. This +// is intended for use with certain Windows APIs that expect a pointer as a +// ULONGLONG. +type ptr64 struct { + ptr unsafe.Pointer +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/sample/sample.go b/vendor/github.com/Microsoft/go-winio/internal/etw/sample/sample.go new file mode 100644 index 00000000..7766eca3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/sample/sample.go @@ -0,0 +1,91 @@ +// Shows a sample usage of the ETW logging package. +package main + +import ( + "bufio" + "fmt" + "os" + + "github.com/Microsoft/go-winio/internal/etw" + "github.com/sirupsen/logrus" + + "golang.org/x/sys/windows" +) + +func callback(sourceID *windows.GUID, state etw.ProviderState, level etw.Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr) { + fmt.Printf("Callback: isEnabled=%d, level=%d, matchAnyKeyword=%d\n", state, level, matchAnyKeyword) +} + +func main() { + provider, err := etw.NewProvider("TestProvider", callback) + + if err != nil { + logrus.Error(err) + return + } + defer func() { + if err := provider.Close(); err != nil { + logrus.Error(err) + } + }() + + fmt.Printf("Provider ID: %s\n", provider) + + reader := bufio.NewReader(os.Stdin) + + fmt.Println("Press enter to log events") + reader.ReadString('\n') + + // Write using high-level API. + if err := provider.WriteEvent( + "TestEvent", + etw.WithEventOpts( + etw.WithLevel(etw.LevelInfo), + etw.WithKeyword(0x140), + ), + etw.WithFields( + etw.StringField("TestField", "Foo"), + etw.StringField("TestField2", "Bar"), + etw.Struct("TestStruct", + etw.StringField("Field1", "Value1"), + etw.StringField("Field2", "Value2")), + etw.StringArray("TestArray", []string{ + "Item1", + "Item2", + "Item3", + "Item4", + "Item5", + })), + ); err != nil { + logrus.Error(err) + return + } + + // Write using low-level API. + descriptor := etw.NewEventDescriptor() + descriptor.Level = etw.LevelInfo + descriptor.Keyword = 0x140 + em := &etw.EventMetadata{} + ed := &etw.EventData{} + em.WriteEventHeader("TestEvent", 0) + em.WriteField("TestField", etw.InTypeANSIString, etw.OutTypeUTF8, 0) + ed.WriteString("Foo") + em.WriteField("TestField2", etw.InTypeANSIString, etw.OutTypeUTF8, 0) + ed.WriteString("Bar") + em.WriteStruct("TestStruct", 2, 0) + em.WriteField("Field1", etw.InTypeANSIString, etw.OutTypeUTF8, 0) + ed.WriteString("Value1") + em.WriteField("Field2", etw.InTypeANSIString, etw.OutTypeUTF8, 0) + ed.WriteString("Value2") + em.WriteArray("TestArray", etw.InTypeANSIString, etw.OutTypeDefault, 0) + ed.WriteUint16(5) + ed.WriteString("Item1") + ed.WriteString("Item2") + ed.WriteString("Item3") + ed.WriteString("Item4") + ed.WriteString("Item5") + if err := provider.WriteEventRaw(descriptor, nil, nil, [][]byte{em.Bytes()}, [][]byte{ed.Bytes()}); err != nil { + logrus.Error(err) + return + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/etw/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/etw/zsyscall_windows.go new file mode 100644 index 00000000..489a0f99 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/etw/zsyscall_windows.go @@ -0,0 +1,78 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package etw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procEventRegister = modadvapi32.NewProc("EventRegister") + procEventUnregister = modadvapi32.NewProc("EventUnregister") + procEventWriteTransfer = modadvapi32.NewProc("EventWriteTransfer") + procEventSetInformation = modadvapi32.NewProc("EventSetInformation") +) + +func eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) { + r0, _, _ := syscall.Syscall6(procEventRegister.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(callback), uintptr(callbackContext), uintptr(unsafe.Pointer(providerHandle)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func eventUnregister(providerHandle providerHandle) (win32err error) { + r0, _, _ := syscall.Syscall(procEventUnregister.Addr(), 1, uintptr(providerHandle), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func eventWriteTransfer(providerHandle providerHandle, descriptor *EventDescriptor, activityID *windows.GUID, relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) { + r0, _, _ := syscall.Syscall6(procEventWriteTransfer.Addr(), 6, uintptr(providerHandle), uintptr(unsafe.Pointer(descriptor)), uintptr(unsafe.Pointer(activityID)), uintptr(unsafe.Pointer(relatedActivityID)), uintptr(dataDescriptorCount), uintptr(unsafe.Pointer(dataDescriptors))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func eventSetInformation(providerHandle providerHandle, class eventInfoClass, information uintptr, length uint32) (win32err error) { + r0, _, _ := syscall.Syscall6(procEventSetInformation.Addr(), 4, uintptr(providerHandle), uintptr(class), uintptr(information), uintptr(length), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 00000000..d99eedb6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,421 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_NO_DATA = syscall.Errno(232) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cPIPE_ACCESS_DUPLEX = 0x3 + cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + + cPIPE_UNLIMITED_INSTANCES = 255 + + cNMPWAIT_USE_DEFAULT_WAIT = 0 + cNMPWAIT_NOWAIT = 1 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 5 seconds. (We do not use WaitNamedPipe.) +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(time.Second * 2) + } + var err error + var h syscall.Handle + for { + h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != cERROR_PIPE_BUSY { + break + } + if time.Now().After(absTimeout) { + return nil, ErrTimeout + } + + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(time.Millisecond * 10) + } + if err != nil { + return nil, &os.PathError{Op: "open", Path: path, Err: err} + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + securityDescriptor []byte + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED + if first { + flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + } + + var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + if c.MessageMode { + mode |= cPIPE_TYPE_MESSAGE + } + + sa := &syscall.SecurityAttributes{} + sa.Length = uint32(unsafe.Sizeof(*sa)) + if securityDescriptor != nil { + len := uint32(len(securityDescriptor)) + sa.SecurityDescriptor = localAlloc(0, len) + defer localFree(sa.SecurityDescriptor) + copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) + } + h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != cERROR_NO_DATA { + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size the input buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + // Create a client handle and connect it. This results in the pipe + // instance always existing, so that clients see ERROR_PIPE_BUSY + // rather than ERROR_FILE_NOT_FOUND. This ties the first instance + // up so that no other instances can be used. This would have been + // cleaner if the Win32 API matched CreateFile with ConnectNamedPipe + // instead of CreateNamedPipe. (Apparently created named pipes are + // considered to be in listening state regardless of whether any + // active calls to ConnectNamedPipe are outstanding.) + h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != nil { + syscall.Close(h) + return nil, err + } + // Close the client handle. The server side of the instance will + // still be busy, leading to ERROR_PIPE_BUSY instead of + // ERROR_NOT_FOUND, as long as we don't close the server handle, + // or disconnect the client with DisconnectNamedPipe. + syscall.Close(h2) + l := &win32PipeListener{ + firstHandle: h, + path: path, + securityDescriptor: sd, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe_test.go b/vendor/github.com/Microsoft/go-winio/pipe_test.go new file mode 100644 index 00000000..38692073 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe_test.go @@ -0,0 +1,516 @@ +package winio + +import ( + "bufio" + "bytes" + "io" + "net" + "os" + "sync" + "syscall" + "testing" + "time" + "unsafe" +) + +var testPipeName = `\\.\pipe\winiotestpipe` + +var aLongTimeAgo = time.Unix(1, 0) + +func TestDialUnknownFailsImmediately(t *testing.T) { + _, err := DialPipe(testPipeName, nil) + if err.(*os.PathError).Err != syscall.ENOENT { + t.Fatalf("expected ENOENT got %v", err) + } +} + +func TestDialListenerTimesOut(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + var d = time.Duration(10 * time.Millisecond) + _, err = DialPipe(testPipeName, &d) + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } +} + +func TestDialAccessDeniedWithRestrictedSD(t *testing.T) { + c := PipeConfig{ + SecurityDescriptor: "D:P(A;;0x1200FF;;;WD)", + } + l, err := ListenPipe(testPipeName, &c) + if err != nil { + t.Fatal(err) + } + defer l.Close() + _, err = DialPipe(testPipeName, nil) + if err.(*os.PathError).Err != syscall.ERROR_ACCESS_DENIED { + t.Fatalf("expected ERROR_ACCESS_DENIED, got %v", err) + } +} + +func getConnection(cfg *PipeConfig) (client net.Conn, server net.Conn, err error) { + l, err := ListenPipe(testPipeName, cfg) + if err != nil { + return + } + defer l.Close() + + type response struct { + c net.Conn + err error + } + ch := make(chan response) + go func() { + c, err := l.Accept() + ch <- response{c, err} + }() + + c, err := DialPipe(testPipeName, nil) + if err != nil { + return + } + + r := <-ch + if err = r.err; err != nil { + c.Close() + return + } + + client = c + server = r.c + return +} + +func TestReadTimeout(t *testing.T) { + c, s, err := getConnection(nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + + c.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) + + buf := make([]byte, 10) + _, err = c.Read(buf) + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } +} + +func server(l net.Listener, ch chan int) { + c, err := l.Accept() + if err != nil { + panic(err) + } + rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) + s, err := rw.ReadString('\n') + if err != nil { + panic(err) + } + _, err = rw.WriteString("got " + s) + if err != nil { + panic(err) + } + err = rw.Flush() + if err != nil { + panic(err) + } + c.Close() + ch <- 1 +} + +func TestFullListenDialReadWrite(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + ch := make(chan int) + go server(l, ch) + + c, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) + _, err = rw.WriteString("hello world\n") + if err != nil { + t.Fatal(err) + } + err = rw.Flush() + if err != nil { + t.Fatal(err) + } + + s, err := rw.ReadString('\n') + if err != nil { + t.Fatal(err) + } + ms := "got hello world\n" + if s != ms { + t.Errorf("expected '%s', got '%s'", ms, s) + } + + <-ch +} + +func TestCloseAbortsListen(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + + ch := make(chan error) + go func() { + _, err := l.Accept() + ch <- err + }() + + time.Sleep(30 * time.Millisecond) + l.Close() + + err = <-ch + if err != ErrPipeListenerClosed { + t.Fatalf("expected ErrPipeListenerClosed, got %v", err) + } +} + +func ensureEOFOnClose(t *testing.T, r io.Reader, w io.Closer) { + b := make([]byte, 10) + w.Close() + n, err := r.Read(b) + if n > 0 { + t.Errorf("unexpected byte count %d", n) + } + if err != io.EOF { + t.Errorf("expected EOF: %v", err) + } +} + +func TestCloseClientEOFServer(t *testing.T) { + c, s, err := getConnection(nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + ensureEOFOnClose(t, c, s) +} + +func TestCloseServerEOFClient(t *testing.T) { + c, s, err := getConnection(nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + ensureEOFOnClose(t, s, c) +} + +func TestCloseWriteEOF(t *testing.T) { + cfg := &PipeConfig{ + MessageMode: true, + } + c, s, err := getConnection(cfg) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + + type closeWriter interface { + CloseWrite() error + } + + err = c.(closeWriter).CloseWrite() + if err != nil { + t.Fatal(err) + } + + b := make([]byte, 10) + _, err = s.Read(b) + if err != io.EOF { + t.Fatal(err) + } +} + +func TestAcceptAfterCloseFails(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + l.Close() + _, err = l.Accept() + if err != ErrPipeListenerClosed { + t.Fatalf("expected ErrPipeListenerClosed, got %v", err) + } +} + +func TestDialTimesOutByDefault(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + _, err = DialPipe(testPipeName, nil) + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } +} + +func TestTimeoutPendingRead(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + serverDone := make(chan struct{}) + + go func() { + s, err := l.Accept() + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + s.Close() + close(serverDone) + }() + + client, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + clientErr := make(chan error) + go func() { + buf := make([]byte, 10) + _, err = client.Read(buf) + clientErr <- err + }() + + time.Sleep(100 * time.Millisecond) // make *sure* the pipe is reading before we set the deadline + client.SetReadDeadline(aLongTimeAgo) + + select { + case err = <-clientErr: + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out while waiting for read to cancel") + <-clientErr + } + <-serverDone +} + +func TestTimeoutPendingWrite(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + serverDone := make(chan struct{}) + + go func() { + s, err := l.Accept() + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + s.Close() + close(serverDone) + }() + + client, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + clientErr := make(chan error) + go func() { + _, err = client.Write([]byte("this should timeout")) + clientErr <- err + }() + + time.Sleep(100 * time.Millisecond) // make *sure* the pipe is writing before we set the deadline + client.SetWriteDeadline(aLongTimeAgo) + + select { + case err = <-clientErr: + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out while waiting for write to cancel") + <-clientErr + } + <-serverDone +} + +type CloseWriter interface { + CloseWrite() error +} + +func TestEchoWithMessaging(t *testing.T) { + c := PipeConfig{ + MessageMode: true, // Use message mode so that CloseWrite() is supported + InputBufferSize: 65536, // Use 64KB buffers to improve performance + OutputBufferSize: 65536, + } + l, err := ListenPipe(testPipeName, &c) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + listenerDone := make(chan bool) + clientDone := make(chan bool) + go func() { + // server echo + conn, e := l.Accept() + if e != nil { + t.Fatal(e) + } + defer conn.Close() + + time.Sleep(500 * time.Millisecond) // make *sure* we don't begin to read before eof signal is sent + io.Copy(conn, conn) + conn.(CloseWriter).CloseWrite() + close(listenerDone) + }() + timeout := 1 * time.Second + client, err := DialPipe(testPipeName, &timeout) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + go func() { + // client read back + bytes := make([]byte, 2) + n, e := client.Read(bytes) + if e != nil { + t.Fatal(e) + } + if n != 2 { + t.Fatalf("expected 2 bytes, got %v", n) + } + close(clientDone) + }() + + payload := make([]byte, 2) + payload[0] = 0 + payload[1] = 1 + + n, err := client.Write(payload) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("expected 2 bytes, got %v", n) + } + client.(CloseWriter).CloseWrite() + <-listenerDone + <-clientDone +} + +func TestConnectRace(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + go func() { + for { + s, err := l.Accept() + if err == ErrPipeListenerClosed { + return + } + + if err != nil { + t.Fatal(err) + } + s.Close() + } + }() + + for i := 0; i < 1000; i++ { + c, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + c.Close() + } +} + +func TestMessageReadMode(t *testing.T) { + var wg sync.WaitGroup + defer wg.Wait() + + l, err := ListenPipe(testPipeName, &PipeConfig{MessageMode: true}) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + msg := ([]byte)("hello world") + + wg.Add(1) + go func() { + defer wg.Done() + s, err := l.Accept() + if err != nil { + t.Fatal(err) + } + _, err = s.Write(msg) + if err != nil { + t.Fatal(err) + } + s.Close() + }() + + c, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + setNamedPipeHandleState := syscall.NewLazyDLL("kernel32.dll").NewProc("SetNamedPipeHandleState") + + p := c.(*win32MessageBytePipe) + mode := uint32(cPIPE_READMODE_MESSAGE) + if s, _, err := setNamedPipeHandleState.Call(uintptr(p.handle), uintptr(unsafe.Pointer(&mode)), 0, 0); s == 0 { + t.Fatal(err) + } + + ch := make([]byte, 1) + var vmsg []byte + for { + n, err := c.Read(ch) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + if n != 1 { + t.Fatal("expected 1: ", n) + } + vmsg = append(vmsg, ch[0]) + } + if !bytes.Equal(msg, vmsg) { + t.Fatalf("expected %s: %s", msg, vmsg) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go new file mode 100644 index 00000000..fe0835b3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go @@ -0,0 +1,192 @@ +package etwlogrus + +import ( + "fmt" + "reflect" + + "github.com/Microsoft/go-winio/internal/etw" + "github.com/sirupsen/logrus" +) + +// Hook is a Logrus hook which logs received events to ETW. +type Hook struct { + provider *etw.Provider +} + +// NewHook registers a new ETW provider and returns a hook to log from it. +func NewHook(providerName string) (*Hook, error) { + hook := Hook{} + + provider, err := etw.NewProvider(providerName, nil) + if err != nil { + return nil, err + } + hook.provider = provider + + return &hook, nil +} + +// Levels returns the set of levels that this hook wants to receive log entries +// for. +func (h *Hook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.TraceLevel, + logrus.DebugLevel, + logrus.InfoLevel, + logrus.WarnLevel, + logrus.ErrorLevel, + logrus.FatalLevel, + logrus.PanicLevel, + } +} + +// Fire receives each Logrus entry as it is logged, and logs it to ETW. +func (h *Hook) Fire(e *logrus.Entry) error { + level := etw.Level(e.Level) + if !h.provider.IsEnabledForLevel(level) { + return nil + } + + // Reserve extra space for the message field. + fields := make([]etw.FieldOpt, 0, len(e.Data)+1) + + fields = append(fields, etw.StringField("Message", e.Message)) + + for k, v := range e.Data { + fields = append(fields, getFieldOpt(k, v)) + } + + // We could try to map Logrus levels to ETW levels, but we would lose some + // fidelity as there are fewer ETW levels. So instead we use the level + // directly. + return h.provider.WriteEvent( + "LogrusEntry", + etw.WithEventOpts(etw.WithLevel(level)), + fields) +} + +// Currently, we support logging basic builtin types (int, string, etc), slices +// of basic builtin types, error, types derived from the basic types (e.g. "type +// foo int"), and structs (recursively logging their fields). We do not support +// slices of derived types (e.g. "[]foo"). +// +// For types that we don't support, the value is formatted via fmt.Sprint, and +// we also log a message that the type is unsupported along with the formatted +// type. The intent of this is to make it easier to see which types are not +// supported in traces, so we can evaluate adding support for more types in the +// future. +func getFieldOpt(k string, v interface{}) etw.FieldOpt { + switch v := v.(type) { + case bool: + return etw.BoolField(k, v) + case []bool: + return etw.BoolArray(k, v) + case string: + return etw.StringField(k, v) + case []string: + return etw.StringArray(k, v) + case int: + return etw.IntField(k, v) + case []int: + return etw.IntArray(k, v) + case int8: + return etw.Int8Field(k, v) + case []int8: + return etw.Int8Array(k, v) + case int16: + return etw.Int16Field(k, v) + case []int16: + return etw.Int16Array(k, v) + case int32: + return etw.Int32Field(k, v) + case []int32: + return etw.Int32Array(k, v) + case int64: + return etw.Int64Field(k, v) + case []int64: + return etw.Int64Array(k, v) + case uint: + return etw.UintField(k, v) + case []uint: + return etw.UintArray(k, v) + case uint8: + return etw.Uint8Field(k, v) + case []uint8: + return etw.Uint8Array(k, v) + case uint16: + return etw.Uint16Field(k, v) + case []uint16: + return etw.Uint16Array(k, v) + case uint32: + return etw.Uint32Field(k, v) + case []uint32: + return etw.Uint32Array(k, v) + case uint64: + return etw.Uint64Field(k, v) + case []uint64: + return etw.Uint64Array(k, v) + case uintptr: + return etw.UintptrField(k, v) + case []uintptr: + return etw.UintptrArray(k, v) + case float32: + return etw.Float32Field(k, v) + case []float32: + return etw.Float32Array(k, v) + case float64: + return etw.Float64Field(k, v) + case []float64: + return etw.Float64Array(k, v) + case error: + return etw.StringField(k, v.Error()) + default: + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool: + return getFieldOpt(k, rv.Bool()) + case reflect.Int: + return getFieldOpt(k, int(rv.Int())) + case reflect.Int8: + return getFieldOpt(k, int8(rv.Int())) + case reflect.Int16: + return getFieldOpt(k, int16(rv.Int())) + case reflect.Int32: + return getFieldOpt(k, int32(rv.Int())) + case reflect.Int64: + return getFieldOpt(k, int64(rv.Int())) + case reflect.Uint: + return getFieldOpt(k, uint(rv.Uint())) + case reflect.Uint8: + return getFieldOpt(k, uint8(rv.Uint())) + case reflect.Uint16: + return getFieldOpt(k, uint16(rv.Uint())) + case reflect.Uint32: + return getFieldOpt(k, uint32(rv.Uint())) + case reflect.Uint64: + return getFieldOpt(k, uint64(rv.Uint())) + case reflect.Uintptr: + return getFieldOpt(k, uintptr(rv.Uint())) + case reflect.Float32: + return getFieldOpt(k, float32(rv.Float())) + case reflect.Float64: + return getFieldOpt(k, float64(rv.Float())) + case reflect.String: + return getFieldOpt(k, rv.String()) + case reflect.Struct: + fields := make([]etw.FieldOpt, 0, rv.NumField()) + for i := 0; i < rv.NumField(); i++ { + field := rv.Field(i) + if field.CanInterface() { + fields = append(fields, getFieldOpt(k, field.Interface())) + } + } + return etw.Struct(k, fields...) + } + } + + return etw.StringField(k, fmt.Sprintf("(Unsupported: %T) %v", v, v)) +} + +// Close cleans up the hook and closes the ETW provider. +func (h *Hook) Close() error { + return h.provider.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook_test.go b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook_test.go new file mode 100644 index 00000000..3653cdc8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook_test.go @@ -0,0 +1,126 @@ +package etwlogrus + +import ( + "github.com/Microsoft/go-winio/internal/etw" + "testing" +) + +func fireEvent(t *testing.T, p *etw.Provider, name string, value interface{}) { + if err := p.WriteEvent( + name, + nil, + etw.WithFields(getFieldOpt("Field", value))); err != nil { + + t.Fatal(err) + } +} + +// The purpose of this test is to log lots of different field types, to test the +// logic that converts them to ETW. Because we don't have a way to +// programatically validate the ETW events, this test has two main purposes: (1) +// validate nothing causes a panic while logging (2) allow manual validation that +// the data is logged correctly (through a tool like WPA). +func TestFieldLogging(t *testing.T) { + // Sample WPRP to collect this provider: + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // Start collection: + // wpr -start HookTest.wprp -filemode + // + // Stop collection: + // wpr -stop HookTest.etl + p, err := etw.NewProvider("HookTest", nil) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := p.Close(); err != nil { + t.Fatal(err) + } + }() + + fireEvent(t, p, "Bool", true) + fireEvent(t, p, "BoolSlice", []bool{true, false, true}) + fireEvent(t, p, "EmptyBoolSlice", []bool{}) + fireEvent(t, p, "String", "teststring") + fireEvent(t, p, "StringSlice", []string{"sstr1", "sstr2", "sstr3"}) + fireEvent(t, p, "EmptyStringSlice", []string{}) + fireEvent(t, p, "Int", int(1)) + fireEvent(t, p, "IntSlice", []int{2, 3, 4}) + fireEvent(t, p, "EmptyIntSlice", []int{}) + fireEvent(t, p, "Int8", int8(5)) + fireEvent(t, p, "Int8Slice", []int8{6, 7, 8}) + fireEvent(t, p, "EmptyInt8Slice", []int8{}) + fireEvent(t, p, "Int16", int16(9)) + fireEvent(t, p, "Int16Slice", []int16{10, 11, 12}) + fireEvent(t, p, "EmptyInt16Slice", []int16{}) + fireEvent(t, p, "Int32", int32(13)) + fireEvent(t, p, "Int32Slice", []int32{14, 15, 16}) + fireEvent(t, p, "EmptyInt32Slice", []int32{}) + fireEvent(t, p, "Int64", int64(17)) + fireEvent(t, p, "Int64Slice", []int64{18, 19, 20}) + fireEvent(t, p, "EmptyInt64Slice", []int64{}) + fireEvent(t, p, "Uint", uint(21)) + fireEvent(t, p, "UintSlice", []uint{22, 23, 24}) + fireEvent(t, p, "EmptyUintSlice", []uint{}) + fireEvent(t, p, "Uint8", uint8(25)) + fireEvent(t, p, "Uint8Slice", []uint8{26, 27, 28}) + fireEvent(t, p, "EmptyUint8Slice", []uint8{}) + fireEvent(t, p, "Uint16", uint16(29)) + fireEvent(t, p, "Uint16Slice", []uint16{30, 31, 32}) + fireEvent(t, p, "EmptyUint16Slice", []uint16{}) + fireEvent(t, p, "Uint32", uint32(33)) + fireEvent(t, p, "Uint32Slice", []uint32{34, 35, 36}) + fireEvent(t, p, "EmptyUint32Slice", []uint32{}) + fireEvent(t, p, "Uint64", uint64(37)) + fireEvent(t, p, "Uint64Slice", []uint64{38, 39, 40}) + fireEvent(t, p, "EmptyUint64Slice", []uint64{}) + fireEvent(t, p, "Uintptr", uintptr(41)) + fireEvent(t, p, "UintptrSlice", []uintptr{42, 43, 44}) + fireEvent(t, p, "EmptyUintptrSlice", []uintptr{}) + fireEvent(t, p, "Float32", float32(45.46)) + fireEvent(t, p, "Float32Slice", []float32{47.48, 49.50, 51.52}) + fireEvent(t, p, "EmptyFloat32Slice", []float32{}) + fireEvent(t, p, "Float64", float64(53.54)) + fireEvent(t, p, "Float64Slice", []float64{55.56, 57.58, 59.60}) + fireEvent(t, p, "EmptyFloat64Slice", []float64{}) + + type struct1 struct { + A float32 + priv int + B []uint + } + type struct2 struct { + A int + B int + } + type struct3 struct { + struct2 + A int + B string + priv string + C struct1 + D uint16 + } + // Unexported fields, and fields in embedded structs, should not log. + fireEvent(t, p, "Struct", struct3{struct2{-1, -2}, 1, "2s", "-3s", struct1{3.4, -4, []uint{5, 6, 7}}, 8}) +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 00000000..9c83d36f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,202 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/privileges_test.go b/vendor/github.com/Microsoft/go-winio/privileges_test.go new file mode 100644 index 00000000..5e94c48c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privileges_test.go @@ -0,0 +1,17 @@ +package winio + +import "testing" + +func TestRunWithUnavailablePrivilege(t *testing.T) { + err := RunWithPrivilege("SeCreateTokenPrivilege", func() error { return nil }) + if _, ok := err.(*PrivilegeError); err == nil || !ok { + t.Fatal("expected PrivilegeError") + } +} + +func TestRunWithPrivileges(t *testing.T) { + err := RunWithPrivilege("SeShutdownPrivilege", func() error { return nil }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 00000000..fc1ee4d3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 00000000..db1b370a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/sd_test.go b/vendor/github.com/Microsoft/go-winio/sd_test.go new file mode 100644 index 00000000..a33925c7 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd_test.go @@ -0,0 +1,26 @@ +package winio + +import "testing" + +func TestLookupInvalidSid(t *testing.T) { + _, err := LookupSidByName(".\\weoifjdsklfj") + aerr, ok := err.(*AccountLookupError) + if !ok || aerr.Err != cERROR_NONE_MAPPED { + t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) + } +} + +func TestLookupValidSid(t *testing.T) { + sid, err := LookupSidByName("Everyone") + if err != nil || sid != "S-1-1-0" { + t.Fatalf("expected S-1-1-0, got %s, %s", sid, err) + } +} + +func TestLookupEmptyNameFails(t *testing.T) { + _, err := LookupSidByName(".\\weoifjdsklfj") + aerr, ok := err.(*AccountLookupError) + if !ok || aerr.Err != cERROR_NONE_MAPPED { + t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 00000000..20d64cf4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go diff --git a/vendor/github.com/Microsoft/go-winio/tools/etw-provider-gen/main.go b/vendor/github.com/Microsoft/go-winio/tools/etw-provider-gen/main.go new file mode 100644 index 00000000..96b76aab --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/tools/etw-provider-gen/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/Microsoft/go-winio/internal/etw" +) + +func main() { + var pn = flag.String("provider-name", "", "The human readable ETW provider name to be converted into GUID format") + flag.Parse() + if pn == nil || *pn == "" { + fmt.Fprint(os.Stderr, "--provider-name is required") + os.Exit(1) + } + p, err := etw.NewProvider(*pn, nil) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to convert provider-name: '%s' with err: '%s", *pn, err) + os.Exit(1) + } + defer p.Close() + fmt.Fprintf(os.Stdout, "%s", p) +} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go new file mode 100644 index 00000000..93a9e33e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/mksyscall_windows.go @@ -0,0 +1,901 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hard-coding unicode mode for VHD library. + +// +build ignore + +/* +mksyscall_windows generates windows system call bodies + +It parses all files specified on command line containing function +prototypes (like syscall_windows.go) and prints system call bodies +to standard output. + +The prototypes are marked by lines beginning with "//sys" and read +like func declarations if //sys is replaced by func, but: + +* The parameter lists must give a name for each argument. This + includes return parameters. + +* The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + +* If the return parameter is an error number, it must be named err. + +* If go func name needs to be different from it's winapi dll name, + the winapi name could be specified at the end, after "=" sign, like + //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA + +* Each function that returns err needs to supply a condition, that + return value of winapi will be tested against to detect failure. + This would set err to windows "last-error", otherwise it will be nil. + The value can be provided at end of //sys declaration, like + //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA + and is [failretval==0] by default. + +Usage: + mksyscall_windows [flags] [path ...] + +The flags are: + -output + Specify output file name (outputs to console if blank). + -trace + Generate print statement after every syscall. +*/ +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "go/format" + "go/parser" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "text/template" +) + +var ( + filename = flag.String("output", "", "output file name (standard output if omitted)") + printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall") + systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory") +) + +func trim(s string) string { + return strings.Trim(s, " \t") +} + +var packageName string + +func packagename() string { + return packageName +} + +func syscalldot() string { + if packageName == "syscall" { + return "" + } + return "syscall." +} + +// Param is function parameter +type Param struct { + Name string + Type string + fn *Fn + tmpVarIdx int +} + +// tmpVar returns temp variable name that will be used to represent p during syscall. +func (p *Param) tmpVar() string { + if p.tmpVarIdx < 0 { + p.tmpVarIdx = p.fn.curTmpVarIdx + p.fn.curTmpVarIdx++ + } + return fmt.Sprintf("_p%d", p.tmpVarIdx) +} + +// BoolTmpVarCode returns source code for bool temp variable. +func (p *Param) BoolTmpVarCode() string { + const code = `var %s uint32 + if %s { + %s = 1 + } else { + %s = 0 + }` + tmp := p.tmpVar() + return fmt.Sprintf(code, tmp, p.Name, tmp, tmp) +} + +// SliceTmpVarCode returns source code for slice temp variable. +func (p *Param) SliceTmpVarCode() string { + const code = `var %s *%s + if len(%s) > 0 { + %s = &%s[0] + }` + tmp := p.tmpVar() + return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name) +} + +// StringTmpVarCode returns source code for string temp variable. +func (p *Param) StringTmpVarCode() string { + errvar := p.fn.Rets.ErrorVarName() + if errvar == "" { + errvar = "_" + } + tmp := p.tmpVar() + const code = `var %s %s + %s, %s = %s(%s)` + s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name) + if errvar == "-" { + return s + } + const morecode = ` + if %s != nil { + return + }` + return s + fmt.Sprintf(morecode, errvar) +} + +// TmpVarCode returns source code for temp variable. +func (p *Param) TmpVarCode() string { + switch { + case p.Type == "bool": + return p.BoolTmpVarCode() + case strings.HasPrefix(p.Type, "[]"): + return p.SliceTmpVarCode() + default: + return "" + } +} + +// TmpVarHelperCode returns source code for helper's temp variable. +func (p *Param) TmpVarHelperCode() string { + if p.Type != "string" { + return "" + } + return p.StringTmpVarCode() +} + +// SyscallArgList returns source code fragments representing p parameter +// in syscall. Slices are translated into 2 syscall parameters: pointer to +// the first element and length. +func (p *Param) SyscallArgList() []string { + t := p.HelperType() + var s string + switch { + case t[0] == '*': + s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name) + case t == "bool": + s = p.tmpVar() + case strings.HasPrefix(t, "[]"): + return []string{ + fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()), + fmt.Sprintf("uintptr(len(%s))", p.Name), + } + default: + s = p.Name + } + return []string{fmt.Sprintf("uintptr(%s)", s)} +} + +// IsError determines if p parameter is used to return error. +func (p *Param) IsError() bool { + return p.Name == "err" && p.Type == "error" +} + +// HelperType returns type of parameter p used in helper function. +func (p *Param) HelperType() string { + if p.Type == "string" { + return p.fn.StrconvType() + } + return p.Type +} + +// join concatenates parameters ps into a string with sep separator. +// Each parameter is converted into string by applying fn to it +// before conversion. +func join(ps []*Param, fn func(*Param) string, sep string) string { + if len(ps) == 0 { + return "" + } + a := make([]string, 0) + for _, p := range ps { + a = append(a, fn(p)) + } + return strings.Join(a, sep) +} + +// Rets describes function return parameters. +type Rets struct { + Name string + Type string + ReturnsError bool + FailCond string +} + +// ErrorVarName returns error variable name for r. +func (r *Rets) ErrorVarName() string { + if r.ReturnsError { + return "err" + } + if r.Type == "error" { + return r.Name + } + return "" +} + +// ToParams converts r into slice of *Param. +func (r *Rets) ToParams() []*Param { + ps := make([]*Param, 0) + if len(r.Name) > 0 { + ps = append(ps, &Param{Name: r.Name, Type: r.Type}) + } + if r.ReturnsError { + ps = append(ps, &Param{Name: "err", Type: "error"}) + } + return ps +} + +// List returns source code of syscall return parameters. +func (r *Rets) List() string { + s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ") + if len(s) > 0 { + s = "(" + s + ")" + } + return s +} + +// PrintList returns source code of trace printing part correspondent +// to syscall return values. +func (r *Rets) PrintList() string { + return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +} + +// SetReturnValuesCode returns source code that accepts syscall return values. +func (r *Rets) SetReturnValuesCode() string { + if r.Name == "" && !r.ReturnsError { + return "" + } + retvar := "r0" + if r.Name == "" { + retvar = "r1" + } + errvar := "_" + if r.ReturnsError { + errvar = "e1" + } + return fmt.Sprintf("%s, _, %s := ", retvar, errvar) +} + +func (r *Rets) useLongHandleErrorCode(retvar string) string { + const code = `if %s { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = %sEINVAL + } + }` + cond := retvar + " == 0" + if r.FailCond != "" { + cond = strings.Replace(r.FailCond, "failretval", retvar, 1) + } + return fmt.Sprintf(code, cond, syscalldot()) +} + +// SetErrorCode returns source code that sets return parameters. +func (r *Rets) SetErrorCode() string { + const code = `if r0 != 0 { + %s = %sErrno(r0) + }` + if r.Name == "" && !r.ReturnsError { + return "" + } + if r.Name == "" { + return r.useLongHandleErrorCode("r1") + } + if r.Type == "error" { + return fmt.Sprintf(code, r.Name, syscalldot()) + } + s := "" + switch { + case r.Type[0] == '*': + s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type) + case r.Type == "bool": + s = fmt.Sprintf("%s = r0 != 0", r.Name) + default: + s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type) + } + if !r.ReturnsError { + return s + } + return s + "\n\t" + r.useLongHandleErrorCode(r.Name) +} + +// Fn describes syscall function. +type Fn struct { + Name string + Params []*Param + Rets *Rets + PrintTrace bool + dllname string + dllfuncname string + src string + // TODO: get rid of this field and just use parameter index instead + curTmpVarIdx int // insure tmp variables have uniq names +} + +// extractParams parses s to extract function parameters. +func extractParams(s string, f *Fn) ([]*Param, error) { + s = trim(s) + if s == "" { + return nil, nil + } + a := strings.Split(s, ",") + ps := make([]*Param, len(a)) + for i := range ps { + s2 := trim(a[i]) + b := strings.Split(s2, " ") + if len(b) != 2 { + b = strings.Split(s2, "\t") + if len(b) != 2 { + return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"") + } + } + ps[i] = &Param{ + Name: trim(b[0]), + Type: trim(b[1]), + fn: f, + tmpVarIdx: -1, + } + } + return ps, nil +} + +// extractSection extracts text out of string s starting after start +// and ending just before end. found return value will indicate success, +// and prefix, body and suffix will contain correspondent parts of string s. +func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) { + s = trim(s) + if strings.HasPrefix(s, string(start)) { + // no prefix + body = s[1:] + } else { + a := strings.SplitN(s, string(start), 2) + if len(a) != 2 { + return "", "", s, false + } + prefix = a[0] + body = a[1] + } + a := strings.SplitN(body, string(end), 2) + if len(a) != 2 { + return "", "", "", false + } + return prefix, a[0], a[1], true +} + +// newFn parses string s and return created function Fn. +func newFn(s string) (*Fn, error) { + s = trim(s) + f := &Fn{ + Rets: &Rets{}, + src: s, + PrintTrace: *printTraceFlag, + } + // function name and args + prefix, body, s, found := extractSection(s, '(', ')') + if !found || prefix == "" { + return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"") + } + f.Name = prefix + var err error + f.Params, err = extractParams(body, f) + if err != nil { + return nil, err + } + // return values + _, body, s, found = extractSection(s, '(', ')') + if found { + r, err := extractParams(body, f) + if err != nil { + return nil, err + } + switch len(r) { + case 0: + case 1: + if r[0].IsError() { + f.Rets.ReturnsError = true + } else { + f.Rets.Name = r[0].Name + f.Rets.Type = r[0].Type + } + case 2: + if !r[1].IsError() { + return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"") + } + f.Rets.ReturnsError = true + f.Rets.Name = r[0].Name + f.Rets.Type = r[0].Type + default: + return nil, errors.New("Too many return values in \"" + f.src + "\"") + } + } + // fail condition + _, body, s, found = extractSection(s, '[', ']') + if found { + f.Rets.FailCond = body + } + // dll and dll function names + s = trim(s) + if s == "" { + return f, nil + } + if !strings.HasPrefix(s, "=") { + return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + } + s = trim(s[1:]) + a := strings.Split(s, ".") + switch len(a) { + case 1: + f.dllfuncname = a[0] + case 2: + f.dllname = a[0] + f.dllfuncname = a[1] + default: + return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + } + return f, nil +} + +// DLLName returns DLL name for function f. +func (f *Fn) DLLName() string { + if f.dllname == "" { + return "kernel32" + } + return f.dllname +} + +// DLLName returns DLL function name for function f. +func (f *Fn) DLLFuncName() string { + if f.dllfuncname == "" { + return f.Name + } + return f.dllfuncname +} + +// ParamList returns source code for function f parameters. +func (f *Fn) ParamList() string { + return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ") +} + +// HelperParamList returns source code for helper function f parameters. +func (f *Fn) HelperParamList() string { + return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ") +} + +// ParamPrintList returns source code of trace printing part correspondent +// to syscall input parameters. +func (f *Fn) ParamPrintList() string { + return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +} + +// ParamCount return number of syscall parameters for function f. +func (f *Fn) ParamCount() int { + n := 0 + for _, p := range f.Params { + n += len(p.SyscallArgList()) + } + return n +} + +// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/... +// to use. It returns parameter count for correspondent SyscallX function. +func (f *Fn) SyscallParamCount() int { + n := f.ParamCount() + switch { + case n <= 3: + return 3 + case n <= 6: + return 6 + case n <= 9: + return 9 + case n <= 12: + return 12 + case n <= 15: + return 15 + default: + panic("too many arguments to system call") + } +} + +// Syscall determines which SyscallX function to use for function f. +func (f *Fn) Syscall() string { + c := f.SyscallParamCount() + if c == 3 { + return syscalldot() + "Syscall" + } + return syscalldot() + "Syscall" + strconv.Itoa(c) +} + +// SyscallParamList returns source code for SyscallX parameters for function f. +func (f *Fn) SyscallParamList() string { + a := make([]string, 0) + for _, p := range f.Params { + a = append(a, p.SyscallArgList()...) + } + for len(a) < f.SyscallParamCount() { + a = append(a, "0") + } + return strings.Join(a, ", ") +} + +// HelperCallParamList returns source code of call into function f helper. +func (f *Fn) HelperCallParamList() string { + a := make([]string, 0, len(f.Params)) + for _, p := range f.Params { + s := p.Name + if p.Type == "string" { + s = p.tmpVar() + } + a = append(a, s) + } + return strings.Join(a, ", ") +} + +// IsUTF16 is true, if f is W (utf16) function. It is false +// for all A (ascii) functions. +func (f *Fn) IsUTF16() bool { + return true +} + +// StrconvFunc returns name of Go string to OS string function for f. +func (f *Fn) StrconvFunc() string { + if f.IsUTF16() { + return syscalldot() + "UTF16PtrFromString" + } + return syscalldot() + "BytePtrFromString" +} + +// StrconvType returns Go type name used for OS string for f. +func (f *Fn) StrconvType() string { + if f.IsUTF16() { + return "*uint16" + } + return "*byte" +} + +// HasStringParam is true, if f has at least one string parameter. +// Otherwise it is false. +func (f *Fn) HasStringParam() bool { + for _, p := range f.Params { + if p.Type == "string" { + return true + } + } + return false +} + +// HelperName returns name of function f helper. +func (f *Fn) HelperName() string { + if !f.HasStringParam() { + return f.Name + } + return "_" + f.Name +} + +// Source files and functions. +type Source struct { + Funcs []*Fn + Files []string + StdLibImports []string + ExternalImports []string +} + +func (src *Source) Import(pkg string) { + src.StdLibImports = append(src.StdLibImports, pkg) + sort.Strings(src.StdLibImports) +} + +func (src *Source) ExternalImport(pkg string) { + src.ExternalImports = append(src.ExternalImports, pkg) + sort.Strings(src.ExternalImports) +} + +// ParseFiles parses files listed in fs and extracts all syscall +// functions listed in sys comments. It returns source files +// and functions collection *Source if successful. +func ParseFiles(fs []string) (*Source, error) { + src := &Source{ + Funcs: make([]*Fn, 0), + Files: make([]string, 0), + StdLibImports: []string{ + "unsafe", + }, + ExternalImports: make([]string, 0), + } + for _, file := range fs { + if err := src.ParseFile(file); err != nil { + return nil, err + } + } + return src, nil +} + +// DLLs return dll names for a source set src. +func (src *Source) DLLs() []string { + uniq := make(map[string]bool) + r := make([]string, 0) + for _, f := range src.Funcs { + name := f.DLLName() + if _, found := uniq[name]; !found { + uniq[name] = true + r = append(r, name) + } + } + return r +} + +// ParseFile adds additional file path to a source set src. +func (src *Source) ParseFile(path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + s := bufio.NewScanner(file) + for s.Scan() { + t := trim(s.Text()) + if len(t) < 7 { + continue + } + if !strings.HasPrefix(t, "//sys") { + continue + } + t = t[5:] + if !(t[0] == ' ' || t[0] == '\t') { + continue + } + f, err := newFn(t[1:]) + if err != nil { + return err + } + src.Funcs = append(src.Funcs, f) + } + if err := s.Err(); err != nil { + return err + } + src.Files = append(src.Files, path) + + // get package name + fset := token.NewFileSet() + _, err = file.Seek(0, 0) + if err != nil { + return err + } + pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly) + if err != nil { + return err + } + packageName = pkg.Name.Name + + return nil +} + +// IsStdRepo returns true if src is part of standard library. +func (src *Source) IsStdRepo() (bool, error) { + if len(src.Files) == 0 { + return false, errors.New("no input files provided") + } + abspath, err := filepath.Abs(src.Files[0]) + if err != nil { + return false, err + } + goroot := runtime.GOROOT() + if runtime.GOOS == "windows" { + abspath = strings.ToLower(abspath) + goroot = strings.ToLower(goroot) + } + sep := string(os.PathSeparator) + if !strings.HasSuffix(goroot, sep) { + goroot += sep + } + return strings.HasPrefix(abspath, goroot), nil +} + +// Generate output source file from a source set src. +func (src *Source) Generate(w io.Writer) error { + const ( + pkgStd = iota // any package in std library + pkgXSysWindows // x/sys/windows package + pkgOther + ) + isStdRepo, err := src.IsStdRepo() + if err != nil { + return err + } + var pkgtype int + switch { + case isStdRepo: + pkgtype = pkgStd + case packageName == "windows": + // TODO: this needs better logic than just using package name + pkgtype = pkgXSysWindows + default: + pkgtype = pkgOther + } + if *systemDLL { + switch pkgtype { + case pkgStd: + src.Import("internal/syscall/windows/sysdll") + case pkgXSysWindows: + default: + src.ExternalImport("golang.org/x/sys/windows") + } + } + if packageName != "syscall" { + src.Import("syscall") + } + funcMap := template.FuncMap{ + "packagename": packagename, + "syscalldot": syscalldot, + "newlazydll": func(dll string) string { + arg := "\"" + dll + ".dll\"" + if !*systemDLL { + return syscalldot() + "NewLazyDLL(" + arg + ")" + } + switch pkgtype { + case pkgStd: + return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))" + case pkgXSysWindows: + return "NewLazySystemDLL(" + arg + ")" + default: + return "windows.NewLazySystemDLL(" + arg + ")" + } + }, + } + t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate)) + err = t.Execute(w, src) + if err != nil { + return errors.New("Failed to execute template: " + err.Error()) + } + return nil +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n") + flag.PrintDefaults() + os.Exit(1) +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + src, err := ParseFiles(flag.Args()) + if err != nil { + log.Fatal(err) + } + + var buf bytes.Buffer + if err := src.Generate(&buf); err != nil { + log.Fatal(err) + } + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + if *filename == "" { + _, err = os.Stdout.Write(data) + } else { + err = ioutil.WriteFile(*filename, data, 0644) + } + if err != nil { + log.Fatal(err) + } +} + +// TODO: use println instead to print in the following template +const srcTemplate = ` + +{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package {{packagename}} + +import ( +{{range .StdLibImports}}"{{.}}" +{{end}} + +{{range .ExternalImports}}"{{.}}" +{{end}} +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e {{syscalldot}}Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( +{{template "dlls" .}} +{{template "funcnames" .}}) +{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}} +{{end}} + +{{/* help functions */}} + +{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}} +{{end}}{{end}} + +{{define "funcnames"}}{{range .Funcs}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}") +{{end}}{{end}} + +{{define "helperbody"}} +func {{.Name}}({{.ParamList}}) {{template "results" .}}{ +{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}}) +} +{{end}} + +{{define "funcbody"}} +func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{ +{{template "tmpvars" .}} {{template "syscall" .}} +{{template "seterror" .}}{{template "printtrace" .}} return +} +{{end}} + +{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}} +{{end}}{{end}}{{end}} + +{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}} +{{end}}{{end}}{{end}} + +{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}} + +{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}} + +{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}} +{{end}}{{end}} + +{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n") +{{end}}{{end}} + +` diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go new file mode 100644 index 00000000..8fa90e91 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -0,0 +1,108 @@ +// +build windows + +package vhd + +import "syscall" + +//go:generate go run mksyscall_windows.go -output zvhd.go vhd.go + +//sys createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.CreateVirtualDisk +//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *uintptr, handle *syscall.Handle) (err error) [failretval != 0] = VirtDisk.OpenVirtualDisk +//sys detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) [failretval != 0] = VirtDisk.DetachVirtualDisk + +type virtualStorageType struct { + DeviceID uint32 + VendorID [16]byte +} + +const virtualDiskAccessNONE uint32 = 0 +const virtualDiskAccessATTACHRO uint32 = 65536 +const virtualDiskAccessATTACHRW uint32 = 131072 +const virtualDiskAccessDETACH uint32 = 262144 +const virtualDiskAccessGETINFO uint32 = 524288 +const virtualDiskAccessCREATE uint32 = 1048576 +const virtualDiskAccessMETAOPS uint32 = 2097152 +const virtualDiskAccessREAD uint32 = 851968 +const virtualDiskAccessALL uint32 = 4128768 +const virtualDiskAccessWRITABLE uint32 = 3276800 + +const createVirtualDiskFlagNone uint32 = 0 +const createVirtualDiskFlagFullPhysicalAllocation uint32 = 1 +const createVirtualDiskFlagPreventWritesToSourceDisk uint32 = 2 +const createVirtualDiskFlagDoNotCopyMetadataFromParent uint32 = 4 + +type version2 struct { + UniqueID [16]byte // GUID + MaximumSize uint64 + BlockSizeInBytes uint32 + SectorSizeInBytes uint32 + ParentPath *uint16 // string + SourcePath *uint16 // string + OpenFlags uint32 + ParentVirtualStorageType virtualStorageType + SourceVirtualStorageType virtualStorageType + ResiliencyGUID [16]byte // GUID +} + +type createVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 version2 +} + +// CreateVhdx will create a simple vhdx file at the given path using default values. +func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { + var defaultType virtualStorageType + + parameters := createVirtualDiskParameters{ + Version: 2, + Version2: version2{ + MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, + BlockSizeInBytes: blockSizeInMb * 1024 * 1024, + }, + } + + var handle syscall.Handle + + if err := createVirtualDisk( + &defaultType, + path, + virtualDiskAccessNONE, + nil, + createVirtualDiskFlagNone, + 0, + ¶meters, + nil, + &handle); err != nil { + return err + } + + if err := syscall.CloseHandle(handle); err != nil { + return err + } + + return nil +} + +// DetachVhd detaches a VHD attached at the given path. +func DetachVhd(path string) error { + var ( + defaultType virtualStorageType + handle syscall.Handle + ) + + if err := openVirtualDisk( + &defaultType, + path, + virtualDiskAccessDETACH, + 0, + nil, + &handle); err != nil { + return err + } + defer syscall.CloseHandle(handle) + + if err := detachVirtualDisk(handle, 0, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd.go new file mode 100644 index 00000000..73f52596 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd.go @@ -0,0 +1,99 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package vhd + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modVirtDisk = windows.NewLazySystemDLL("VirtDisk.dll") + + procCreateVirtualDisk = modVirtDisk.NewProc("CreateVirtualDisk") + procOpenVirtualDisk = modVirtDisk.NewProc("OpenVirtualDisk") + procDetachVirtualDisk = modVirtDisk.NewProc("DetachVirtualDisk") +) + +func createVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, flags, providerSpecificFlags, parameters, o, handle) +} + +func _createVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, flags uint32, providerSpecificFlags uint32, parameters *createVirtualDiskParameters, o *syscall.Overlapped, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(flags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *uintptr, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *uintptr, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func detachVirtualDisk(handle syscall.Handle, flags uint32, providerSpecificFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(flags), uintptr(providerSpecificFlags)) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/wim/decompress.go b/vendor/github.com/Microsoft/go-winio/wim/decompress.go new file mode 100644 index 00000000..f4e67f84 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/wim/decompress.go @@ -0,0 +1,138 @@ +package wim + +import ( + "encoding/binary" + "io" + "io/ioutil" + + "github.com/Microsoft/go-winio/wim/lzx" +) + +const chunkSize = 32768 // Compressed resource chunk size + +type compressedReader struct { + r *io.SectionReader + d io.ReadCloser + chunks []int64 + curChunk int + originalSize int64 +} + +func newCompressedReader(r *io.SectionReader, originalSize int64, offset int64) (*compressedReader, error) { + nchunks := (originalSize + chunkSize - 1) / chunkSize + var base int64 + chunks := make([]int64, nchunks) + if originalSize <= 0xffffffff { + // 32-bit chunk offsets + base = (nchunks - 1) * 4 + chunks32 := make([]uint32, nchunks-1) + err := binary.Read(r, binary.LittleEndian, chunks32) + if err != nil { + return nil, err + } + for i, n := range chunks32 { + chunks[i+1] = int64(n) + } + + } else { + // 64-bit chunk offsets + base = (nchunks - 1) * 8 + err := binary.Read(r, binary.LittleEndian, chunks[1:]) + if err != nil { + return nil, err + } + } + + for i, c := range chunks { + chunks[i] = c + base + } + + cr := &compressedReader{ + r: r, + chunks: chunks, + originalSize: originalSize, + } + + err := cr.reset(int(offset / chunkSize)) + if err != nil { + return nil, err + } + + suboff := offset % chunkSize + if suboff != 0 { + _, err := io.CopyN(ioutil.Discard, cr.d, suboff) + if err != nil { + return nil, err + } + } + return cr, nil +} + +func (r *compressedReader) chunkOffset(n int) int64 { + if n == len(r.chunks) { + return r.r.Size() + } + return r.chunks[n] +} + +func (r *compressedReader) chunkSize(n int) int { + return int(r.chunkOffset(n+1) - r.chunkOffset(n)) +} + +func (r *compressedReader) uncompressedSize(n int) int { + if n < len(r.chunks)-1 { + return chunkSize + } + size := int(r.originalSize % chunkSize) + if size == 0 { + size = chunkSize + } + return size +} + +func (r *compressedReader) reset(n int) error { + if n >= len(r.chunks) { + return io.EOF + } + if r.d != nil { + r.d.Close() + } + r.curChunk = n + size := r.chunkSize(n) + uncompressedSize := r.uncompressedSize(n) + section := io.NewSectionReader(r.r, r.chunkOffset(n), int64(size)) + if size != uncompressedSize { + d, err := lzx.NewReader(section, uncompressedSize) + if err != nil { + return err + } + r.d = d + } else { + r.d = ioutil.NopCloser(section) + } + + return nil +} + +func (r *compressedReader) Read(b []byte) (int, error) { + for { + n, err := r.d.Read(b) + if err != io.EOF { + return n, err + } + + err = r.reset(r.curChunk + 1) + if err != nil { + return n, err + } + } +} + +func (r *compressedReader) Close() error { + var err error + if r.d != nil { + err = r.d.Close() + r.d = nil + } + return err +} diff --git a/vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go b/vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go new file mode 100644 index 00000000..4deb0df7 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/wim/lzx/lzx.go @@ -0,0 +1,606 @@ +// Package lzx implements a decompressor for the the WIM variant of the +// LZX compression algorithm. +// +// The LZX algorithm is an earlier variant of LZX DELTA, which is documented +// at https://msdn.microsoft.com/en-us/library/cc483133(v=exchg.80).aspx. +package lzx + +import ( + "bytes" + "encoding/binary" + "errors" + "io" +) + +const ( + maincodecount = 496 + maincodesplit = 256 + lencodecount = 249 + lenshift = 9 + codemask = 0x1ff + tablebits = 9 + tablesize = 1 << tablebits + + maxBlockSize = 32768 + windowSize = 32768 + + maxTreePathLen = 16 + + e8filesize = 12000000 + maxe8offset = 0x3fffffff + + verbatimBlock = 1 + alignedOffsetBlock = 2 + uncompressedBlock = 3 +) + +var footerBits = [...]byte{ + 0, 0, 0, 0, 1, 1, 2, 2, + 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, + 11, 11, 12, 12, 13, 13, 14, +} + +var basePosition = [...]uint16{ + 0, 1, 2, 3, 4, 6, 8, 12, + 16, 24, 32, 48, 64, 96, 128, 192, + 256, 384, 512, 768, 1024, 1536, 2048, 3072, + 4096, 6144, 8192, 12288, 16384, 24576, 32768, +} + +var ( + errCorrupt = errors.New("LZX data corrupt") +) + +// Reader is an interface used by the decompressor to access +// the input stream. If the provided io.Reader does not implement +// Reader, then a bufio.Reader is used. +type Reader interface { + io.Reader + io.ByteReader +} + +type decompressor struct { + r io.Reader + err error + unaligned bool + nbits byte + c uint32 + lru [3]uint16 + uncompressed int + windowReader *bytes.Reader + mainlens [maincodecount]byte + lenlens [lencodecount]byte + window [windowSize]byte + b []byte + bv int + bo int +} + +//go:noinline +func (f *decompressor) fail(err error) { + if f.err == nil { + f.err = err + } + f.bo = 0 + f.bv = 0 +} + +func (f *decompressor) ensureAtLeast(n int) error { + if f.bv-f.bo >= n { + return nil + } + + if f.err != nil { + return f.err + } + + if f.bv != f.bo { + copy(f.b[:f.bv-f.bo], f.b[f.bo:f.bv]) + } + n, err := io.ReadAtLeast(f.r, f.b[f.bv-f.bo:], n) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else { + f.fail(err) + } + return err + } + f.bv = f.bv - f.bo + n + f.bo = 0 + return nil +} + +// feed retrieves another 16-bit word from the stream and consumes +// it into f.c. It returns false if there are no more bytes available. +// Otherwise, on error, it sets f.err. +func (f *decompressor) feed() bool { + err := f.ensureAtLeast(2) + if err != nil { + if err == io.ErrUnexpectedEOF { + return false + } + } + f.c |= (uint32(f.b[f.bo+1])<<8 | uint32(f.b[f.bo])) << (16 - f.nbits) + f.nbits += 16 + f.bo += 2 + return true +} + +// getBits retrieves the next n bits from the byte stream. n +// must be <= 16. It sets f.err on error. +func (f *decompressor) getBits(n byte) uint16 { + if f.nbits < n { + if !f.feed() { + f.fail(io.ErrUnexpectedEOF) + } + } + c := uint16(f.c >> (32 - n)) + f.c <<= n + f.nbits -= n + return c +} + +type huffman struct { + extra [][]uint16 + maxbits byte + table [tablesize]uint16 +} + +// buildTable builds a huffman decoding table from a slice of code lengths, +// one per code, in order. Each code length must be <= maxTreePathLen. +// See https://en.wikipedia.org/wiki/Canonical_Huffman_code. +func buildTable(codelens []byte) *huffman { + // Determine the number of codes of each length, and the + // maximum length. + var count [maxTreePathLen + 1]uint + var max byte + for _, cl := range codelens { + count[cl]++ + if max < cl { + max = cl + } + } + + if max == 0 { + return &huffman{} + } + + // Determine the first code of each length. + var first [maxTreePathLen + 1]uint + code := uint(0) + for i := byte(1); i <= max; i++ { + code <<= 1 + first[i] = code + code += count[i] + } + + if code != 1< tablebits, split long codes into additional tables + // of suffixes of max-tablebits length. + h := &huffman{maxbits: max} + if max > tablebits { + core := first[tablebits+1] / 2 // Number of codes that fit without extra tables + nextra := 1<> (cl - tablebits) + suffix := code & (1<<(cl-tablebits) - 1) + extendedCode := suffix << (max - cl) + for j := uint(0); j < 1<<(max-cl); j++ { + h.extra[h.table[prefix]][extendedCode+j] = v + } + } + } + } + + return h +} + +// getCode retrieves the next code using the provided +// huffman tree. It sets f.err on error. +func (f *decompressor) getCode(h *huffman) uint16 { + if h.maxbits > 0 { + if f.nbits < maxTreePathLen { + f.feed() + } + + // For codes with length < tablebits, it doesn't matter + // what the remainder of the bits used for table lookup + // are, since entries with all possible suffixes were + // added to the table. + c := h.table[f.c>>(32-tablebits)] + if c >= 1<>(32-(h.maxbits-tablebits))] + } + + n := byte(c >> lenshift) + if f.nbits >= n { + // Only consume the length of the code, not the maximum + // code length. + f.c <<= n + f.nbits -= n + return c & codemask + } + + f.fail(io.ErrUnexpectedEOF) + return 0 + } + + // This is an empty tree. It should not be used. + f.fail(errCorrupt) + return 0 +} + +// readTree updates the huffman tree path lengths in lens by +// reading and decoding lengths from the byte stream. lens +// should be prepopulated with the previous block's tree's path +// lengths. For the first block, lens should be zero. +func (f *decompressor) readTree(lens []byte) error { + // Get the pre-tree for the main tree. + var pretreeLen [20]byte + for i := range pretreeLen { + pretreeLen[i] = byte(f.getBits(4)) + } + if f.err != nil { + return f.err + } + h := buildTable(pretreeLen[:]) + + // The lengths are encoded as a series of huffman codes + // encoded by the pre-tree. + for i := 0; i < len(lens); { + c := byte(f.getCode(h)) + if f.err != nil { + return f.err + } + switch { + case c <= 16: // length is delta from previous length + lens[i] = (lens[i] + 17 - c) % 17 + i++ + case c == 17: // next n + 4 lengths are zero + zeroes := int(f.getBits(4)) + 4 + if i+zeroes > len(lens) { + return errCorrupt + } + for j := 0; j < zeroes; j++ { + lens[i+j] = 0 + } + i += zeroes + case c == 18: // next n + 20 lengths are zero + zeroes := int(f.getBits(5)) + 20 + if i+zeroes > len(lens) { + return errCorrupt + } + for j := 0; j < zeroes; j++ { + lens[i+j] = 0 + } + i += zeroes + case c == 19: // next n + 4 lengths all have the same value + same := int(f.getBits(1)) + 4 + if i+same > len(lens) { + return errCorrupt + } + c = byte(f.getCode(h)) + if c > 16 { + return errCorrupt + } + l := (lens[i] + 17 - c) % 17 + for j := 0; j < same; j++ { + lens[i+j] = l + } + i += same + default: + return errCorrupt + } + } + + if f.err != nil { + return f.err + } + return nil +} + +func (f *decompressor) readBlockHeader() (byte, uint16, error) { + // If the previous block was an unaligned uncompressed block, restore + // 2-byte alignment. + if f.unaligned { + err := f.ensureAtLeast(1) + if err != nil { + return 0, 0, err + } + f.bo++ + f.unaligned = false + } + + blockType := f.getBits(3) + full := f.getBits(1) + var blockSize uint16 + if full != 0 { + blockSize = maxBlockSize + } else { + blockSize = f.getBits(16) + if blockSize > maxBlockSize { + return 0, 0, errCorrupt + } + } + + if f.err != nil { + return 0, 0, f.err + } + + switch blockType { + case verbatimBlock, alignedOffsetBlock: + // The caller will read the huffman trees. + case uncompressedBlock: + if f.nbits > 16 { + panic("impossible: more than one 16-bit word remains") + } + + // Drop the remaining bits in the current 16-bit word + // If there are no bits left, discard a full 16-bit word. + n := f.nbits + if n == 0 { + n = 16 + } + + f.getBits(n) + + // Read the LRU values for the next block. + err := f.ensureAtLeast(12) + if err != nil { + return 0, 0, err + } + + f.lru[0] = uint16(binary.LittleEndian.Uint32(f.b[f.bo : f.bo+4])) + f.lru[1] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+4 : f.bo+8])) + f.lru[2] = uint16(binary.LittleEndian.Uint32(f.b[f.bo+8 : f.bo+12])) + f.bo += 12 + + default: + return 0, 0, errCorrupt + } + + return byte(blockType), blockSize, nil +} + +// readTrees reads the two or three huffman trees for the current block. +// readAligned specifies whether to read the aligned offset tree. +func (f *decompressor) readTrees(readAligned bool) (main *huffman, length *huffman, aligned *huffman, err error) { + // Aligned offset blocks start with a small aligned offset tree. + if readAligned { + var alignedLen [8]byte + for i := range alignedLen { + alignedLen[i] = byte(f.getBits(3)) + } + aligned = buildTable(alignedLen[:]) + if aligned == nil { + err = errors.New("corrupt") + return + } + } + + // The main tree is encoded in two parts. + err = f.readTree(f.mainlens[:maincodesplit]) + if err != nil { + return + } + err = f.readTree(f.mainlens[maincodesplit:]) + if err != nil { + return + } + + main = buildTable(f.mainlens[:]) + if main == nil { + err = errors.New("corrupt") + return + } + + // The length tree is encoding in a single part. + err = f.readTree(f.lenlens[:]) + if err != nil { + return + } + + length = buildTable(f.lenlens[:]) + if length == nil { + err = errors.New("corrupt") + return + } + + err = f.err + return +} + +// readCompressedBlock decodes a compressed block, writing into the window +// starting at start and ending at end, and using the provided huffman trees. +func (f *decompressor) readCompressedBlock(start, end uint16, hmain, hlength, haligned *huffman) (int, error) { + i := start + for i < end { + main := f.getCode(hmain) + if f.err != nil { + break + } + if main < 256 { + // Literal byte. + f.window[i] = byte(main) + i++ + continue + } + + // This is a match backward in the window. Determine + // the offset and dlength. + matchlen := (main - 256) % 8 + slot := (main - 256) / 8 + + // The length is either the low bits of the code, + // or if this is 7, is encoded with the length tree. + if matchlen == 7 { + matchlen += f.getCode(hlength) + } + matchlen += 2 + + var matchoffset uint16 + if slot < 3 { + // The offset is one of the LRU values. + matchoffset = f.lru[slot] + f.lru[slot] = f.lru[0] + f.lru[0] = matchoffset + } else { + // The offset is encoded as a combination of the + // slot and more bits from the bit stream. + offsetbits := footerBits[slot] + var verbatimbits, alignedbits uint16 + if offsetbits > 0 { + if haligned != nil && offsetbits >= 3 { + // This is an aligned offset block. Combine + // the bits written verbatim with the aligned + // offset tree code. + verbatimbits = f.getBits(offsetbits-3) * 8 + alignedbits = f.getCode(haligned) + } else { + // There are no aligned offset bits to read, + // only verbatim bits. + verbatimbits = f.getBits(offsetbits) + alignedbits = 0 + } + } + matchoffset = basePosition[slot] + verbatimbits + alignedbits - 2 + // Update the LRU cache. + f.lru[2] = f.lru[1] + f.lru[1] = f.lru[0] + f.lru[0] = matchoffset + } + + if matchoffset <= i && matchlen <= end-i { + copyend := i + matchlen + for ; i < copyend; i++ { + f.window[i] = f.window[i-matchoffset] + } + } else { + f.fail(errCorrupt) + break + } + } + return int(i - start), f.err +} + +// readBlock decodes the current block and returns the number of uncompressed bytes. +func (f *decompressor) readBlock(start uint16) (int, error) { + blockType, size, err := f.readBlockHeader() + if err != nil { + return 0, err + } + + if blockType == uncompressedBlock { + if size%2 == 1 { + // Remember to realign the byte stream at the next block. + f.unaligned = true + } + copied := 0 + if f.bo < f.bv { + copied = int(size) + s := int(start) + if copied > f.bv-f.bo { + copied = f.bv - f.bo + } + copy(f.window[s:s+copied], f.b[f.bo:f.bo+copied]) + f.bo += copied + } + n, err := io.ReadFull(f.r, f.window[start+uint16(copied):start+size]) + return copied + n, err + } + + hmain, hlength, haligned, err := f.readTrees(blockType == alignedOffsetBlock) + if err != nil { + return 0, err + } + + return f.readCompressedBlock(start, start+size, hmain, hlength, haligned) +} + +// decodeE8 reverses the 0xe8 x86 instruction encoding that was performed +// to the uncompressed data before it was compressed. +func decodeE8(b []byte, off int64) { + if off > maxe8offset || len(b) < 10 { + return + } + for i := 0; i < len(b)-10; i++ { + if b[i] == 0xe8 { + currentPtr := int32(off) + int32(i) + abs := int32(binary.LittleEndian.Uint32(b[i+1 : i+5])) + if abs >= -currentPtr && abs < e8filesize { + var rel int32 + if abs >= 0 { + rel = abs - currentPtr + } else { + rel = abs + e8filesize + } + binary.LittleEndian.PutUint32(b[i+1:i+5], uint32(rel)) + } + i += 4 + } + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + // Read and uncompress everything. + if f.windowReader == nil { + n := 0 + for n < f.uncompressed { + k, err := f.readBlock(uint16(n)) + if err != nil { + return 0, err + } + n += k + } + decodeE8(f.window[:f.uncompressed], 0) + f.windowReader = bytes.NewReader(f.window[:f.uncompressed]) + } + + // Just read directly from the window. + return f.windowReader.Read(b) +} + +func (f *decompressor) Close() error { + return nil +} + +// NewReader returns a new io.ReadCloser that decompresses a +// WIM LZX stream until uncompressedSize bytes have been returned. +func NewReader(r io.Reader, uncompressedSize int) (io.ReadCloser, error) { + if uncompressedSize > windowSize { + return nil, errors.New("uncompressed size is limited to 32KB") + } + f := &decompressor{ + lru: [3]uint16{1, 1, 1}, + uncompressed: uncompressedSize, + b: make([]byte, 4096), + r: r, + } + return f, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/wim/validate/validate.go b/vendor/github.com/Microsoft/go-winio/wim/validate/validate.go new file mode 100644 index 00000000..ba03fc9a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/wim/validate/validate.go @@ -0,0 +1,51 @@ +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/Microsoft/go-winio/wim" +) + +func main() { + flag.Parse() + f, err := os.Open(flag.Arg(0)) + if err != nil { + panic(err) + } + + w, err := wim.NewReader(f) + if err != nil { + panic(err) + + } + + fmt.Printf("%#v\n%#v\n", w.Image[0], w.Image[0].Windows) + + dir, err := w.Image[0].Open() + if err != nil { + panic(err) + } + + err = recur(dir) + if err != nil { + panic(err) + } +} + +func recur(d *wim.File) error { + files, err := d.Readdir() + if err != nil { + return fmt.Errorf("%s: %s", d.Name, err) + } + for _, f := range files { + if f.IsDir() { + err = recur(f) + if err != nil { + return fmt.Errorf("%s: %s", f.Name, err) + } + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/wim/wim.go b/vendor/github.com/Microsoft/go-winio/wim/wim.go new file mode 100644 index 00000000..1d02e920 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/wim/wim.go @@ -0,0 +1,866 @@ +// Package wim implements a WIM file parser. +// +// WIM files are used to distribute Windows file system and container images. +// They are documented at https://msdn.microsoft.com/en-us/library/windows/desktop/dd861280.aspx. +package wim + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "strconv" + "sync" + "time" + "unicode/utf16" +) + +// File attribute constants from Windows. +const ( + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_DEVICE = 0x00000040 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_ATTRIBUTE_COMPRESSED = 0x00000800 + FILE_ATTRIBUTE_OFFLINE = 0x00001000 + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 + FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 + FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 + FILE_ATTRIBUTE_VIRTUAL = 0x00010000 + FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 + FILE_ATTRIBUTE_EA = 0x00040000 +) + +// Windows processor architectures. +const ( + PROCESSOR_ARCHITECTURE_INTEL = 0 + PROCESSOR_ARCHITECTURE_MIPS = 1 + PROCESSOR_ARCHITECTURE_ALPHA = 2 + PROCESSOR_ARCHITECTURE_PPC = 3 + PROCESSOR_ARCHITECTURE_SHX = 4 + PROCESSOR_ARCHITECTURE_ARM = 5 + PROCESSOR_ARCHITECTURE_IA64 = 6 + PROCESSOR_ARCHITECTURE_ALPHA64 = 7 + PROCESSOR_ARCHITECTURE_MSIL = 8 + PROCESSOR_ARCHITECTURE_AMD64 = 9 + PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10 + PROCESSOR_ARCHITECTURE_NEUTRAL = 11 + PROCESSOR_ARCHITECTURE_ARM64 = 12 +) + +var wimImageTag = [...]byte{'M', 'S', 'W', 'I', 'M', 0, 0, 0} + +type guid struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +func (g guid) String() string { + return fmt.Sprintf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", g.Data1, g.Data2, g.Data3, g.Data4[0], g.Data4[1], g.Data4[2], g.Data4[3], g.Data4[4], g.Data4[5], g.Data4[6], g.Data4[7]) +} + +type resourceDescriptor struct { + FlagsAndCompressedSize uint64 + Offset int64 + OriginalSize int64 +} + +type resFlag byte + +const ( + resFlagFree resFlag = 1 << iota + resFlagMetadata + resFlagCompressed + resFlagSpanned +) + +const validate = false + +const supportedResFlags = resFlagMetadata | resFlagCompressed + +func (r *resourceDescriptor) Flags() resFlag { + return resFlag(r.FlagsAndCompressedSize >> 56) +} + +func (r *resourceDescriptor) CompressedSize() int64 { + return int64(r.FlagsAndCompressedSize & 0xffffffffffffff) +} + +func (r *resourceDescriptor) String() string { + s := fmt.Sprintf("%d bytes at %d", r.CompressedSize(), r.Offset) + if r.Flags()&4 != 0 { + s += fmt.Sprintf(" (uncompresses to %d)", r.OriginalSize) + } + return s +} + +// SHA1Hash contains the SHA1 hash of a file or stream. +type SHA1Hash [20]byte + +type streamDescriptor struct { + resourceDescriptor + PartNumber uint16 + RefCount uint32 + Hash SHA1Hash +} + +type hdrFlag uint32 + +const ( + hdrFlagReserved hdrFlag = 1 << iota + hdrFlagCompressed + hdrFlagReadOnly + hdrFlagSpanned + hdrFlagResourceOnly + hdrFlagMetadataOnly + hdrFlagWriteInProgress + hdrFlagRpFix +) + +const ( + hdrFlagCompressReserved hdrFlag = 1 << (iota + 16) + hdrFlagCompressXpress + hdrFlagCompressLzx +) + +const supportedHdrFlags = hdrFlagRpFix | hdrFlagReadOnly | hdrFlagCompressed | hdrFlagCompressLzx + +type wimHeader struct { + ImageTag [8]byte + Size uint32 + Version uint32 + Flags hdrFlag + CompressionSize uint32 + WIMGuid guid + PartNumber uint16 + TotalParts uint16 + ImageCount uint32 + OffsetTable resourceDescriptor + XMLData resourceDescriptor + BootMetadata resourceDescriptor + BootIndex uint32 + Padding uint32 + Integrity resourceDescriptor + Unused [60]byte +} + +type securityblockDisk struct { + TotalLength uint32 + NumEntries uint32 +} + +const securityblockDiskSize = 8 + +type direntry struct { + Attributes uint32 + SecurityID uint32 + SubdirOffset int64 + Unused1, Unused2 int64 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + Hash SHA1Hash + Padding uint32 + ReparseHardLink int64 + StreamCount uint16 + ShortNameLength uint16 + FileNameLength uint16 +} + +var direntrySize = int64(binary.Size(direntry{}) + 8) // includes an 8-byte length prefix + +type streamentry struct { + Unused int64 + Hash SHA1Hash + NameLength int16 +} + +var streamentrySize = int64(binary.Size(streamentry{}) + 8) // includes an 8-byte length prefix + +// Filetime represents a Windows time. +type Filetime struct { + LowDateTime uint32 + HighDateTime uint32 +} + +// Time returns the time as time.Time. +func (ft *Filetime) Time() time.Time { + // 100-nanosecond intervals since January 1, 1601 + nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) + // change starting time to the Epoch (00:00:00 UTC, January 1, 1970) + nsec -= 116444736000000000 + // convert into nanoseconds + nsec *= 100 + return time.Unix(0, nsec) +} + +// UnmarshalXML unmarshals the time from a WIM XML blob. +func (ft *Filetime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type time struct { + Low string `xml:"LOWPART"` + High string `xml:"HIGHPART"` + } + var t time + err := d.DecodeElement(&t, &start) + if err != nil { + return err + } + + low, err := strconv.ParseUint(t.Low, 0, 32) + if err != nil { + return err + } + high, err := strconv.ParseUint(t.High, 0, 32) + if err != nil { + return err + } + + ft.LowDateTime = uint32(low) + ft.HighDateTime = uint32(high) + return nil +} + +type info struct { + Image []ImageInfo `xml:"IMAGE"` +} + +// ImageInfo contains information about the image. +type ImageInfo struct { + Name string `xml:"NAME"` + Index int `xml:"INDEX,attr"` + CreationTime Filetime `xml:"CREATIONTIME"` + ModTime Filetime `xml:"LASTMODIFICATIONTIME"` + Windows *WindowsInfo `xml:"WINDOWS"` +} + +// WindowsInfo contains information about the Windows installation in the image. +type WindowsInfo struct { + Arch byte `xml:"ARCH"` + ProductName string `xml:"PRODUCTNAME"` + EditionID string `xml:"EDITIONID"` + InstallationType string `xml:"INSTALLATIONTYPE"` + ProductType string `xml:"PRODUCTTYPE"` + Languages []string `xml:"LANGUAGES>LANGUAGE"` + DefaultLanguage string `xml:"LANGUAGES>DEFAULT"` + Version Version `xml:"VERSION"` + SystemRoot string `xml:"SYSTEMROOT"` +} + +// Version represents a Windows build version. +type Version struct { + Major int `xml:"MAJOR"` + Minor int `xml:"MINOR"` + Build int `xml:"BUILD"` + SPBuild int `xml:"SPBUILD"` + SPLevel int `xml:"SPLEVEL"` +} + +// ParseError is returned when the WIM cannot be parsed. +type ParseError struct { + Oper string + Path string + Err error +} + +func (e *ParseError) Error() string { + if e.Path == "" { + return "WIM parse error at " + e.Oper + ": " + e.Err.Error() + } + return fmt.Sprintf("WIM parse error: %s %s: %s", e.Oper, e.Path, e.Err.Error()) +} + +// Reader provides functions to read a WIM file. +type Reader struct { + hdr wimHeader + r io.ReaderAt + fileData map[SHA1Hash]resourceDescriptor + + XMLInfo string // The XML information about the WIM. + Image []*Image // The WIM's images. +} + +// Image represents an image within a WIM file. +type Image struct { + wim *Reader + offset resourceDescriptor + sds [][]byte + rootOffset int64 + r io.ReadCloser + curOffset int64 + m sync.Mutex + + ImageInfo +} + +// StreamHeader contains alternate data stream metadata. +type StreamHeader struct { + Name string + Hash SHA1Hash + Size int64 +} + +// Stream represents an alternate data stream or reparse point data stream. +type Stream struct { + StreamHeader + wim *Reader + offset resourceDescriptor +} + +// FileHeader contains file metadata. +type FileHeader struct { + Name string + ShortName string + Attributes uint32 + SecurityDescriptor []byte + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + Hash SHA1Hash + Size int64 + LinkID int64 + ReparseTag uint32 + ReparseReserved uint32 +} + +// File represents a file or directory in a WIM image. +type File struct { + FileHeader + Streams []*Stream + offset resourceDescriptor + img *Image + subdirOffset int64 +} + +// NewReader returns a Reader that can be used to read WIM file data. +func NewReader(f io.ReaderAt) (*Reader, error) { + r := &Reader{r: f} + section := io.NewSectionReader(f, 0, 0xffff) + err := binary.Read(section, binary.LittleEndian, &r.hdr) + if err != nil { + return nil, err + } + + if r.hdr.ImageTag != wimImageTag { + return nil, &ParseError{Oper: "image tag", Err: errors.New("not a WIM file")} + } + + if r.hdr.Flags&^supportedHdrFlags != 0 { + return nil, fmt.Errorf("unsupported WIM flags %x", r.hdr.Flags&^supportedHdrFlags) + } + + if r.hdr.CompressionSize != 0x8000 { + return nil, fmt.Errorf("unsupported compression size %d", r.hdr.CompressionSize) + } + + if r.hdr.TotalParts != 1 { + return nil, errors.New("multi-part WIM not supported") + } + + fileData, images, err := r.readOffsetTable(&r.hdr.OffsetTable) + if err != nil { + return nil, err + } + + xmlinfo, err := r.readXML() + if err != nil { + return nil, err + } + + var info info + err = xml.Unmarshal([]byte(xmlinfo), &info) + if err != nil { + return nil, &ParseError{Oper: "XML info", Err: err} + } + + for i, img := range images { + for _, imgInfo := range info.Image { + if imgInfo.Index == i+1 { + img.ImageInfo = imgInfo + break + } + } + } + + r.fileData = fileData + r.Image = images + r.XMLInfo = xmlinfo + return r, nil +} + +// Close releases resources associated with the Reader. +func (r *Reader) Close() error { + for _, img := range r.Image { + img.reset() + } + return nil +} + +func (r *Reader) resourceReader(hdr *resourceDescriptor) (io.ReadCloser, error) { + return r.resourceReaderWithOffset(hdr, 0) +} + +func (r *Reader) resourceReaderWithOffset(hdr *resourceDescriptor, offset int64) (io.ReadCloser, error) { + var sr io.ReadCloser + section := io.NewSectionReader(r.r, hdr.Offset, hdr.CompressedSize()) + if hdr.Flags()&resFlagCompressed == 0 { + section.Seek(offset, 0) + sr = ioutil.NopCloser(section) + } else { + cr, err := newCompressedReader(section, hdr.OriginalSize, offset) + if err != nil { + return nil, err + } + sr = cr + } + + return sr, nil +} + +func (r *Reader) readResource(hdr *resourceDescriptor) ([]byte, error) { + rsrc, err := r.resourceReader(hdr) + if err != nil { + return nil, err + } + defer rsrc.Close() + return ioutil.ReadAll(rsrc) +} + +func (r *Reader) readXML() (string, error) { + if r.hdr.XMLData.CompressedSize() == 0 { + return "", nil + } + rsrc, err := r.resourceReader(&r.hdr.XMLData) + if err != nil { + return "", err + } + defer rsrc.Close() + + XMLData := make([]uint16, r.hdr.XMLData.OriginalSize/2) + err = binary.Read(rsrc, binary.LittleEndian, XMLData) + if err != nil { + return "", &ParseError{Oper: "XML data", Err: err} + } + + // The BOM will always indicate little-endian UTF-16. + if XMLData[0] != 0xfeff { + return "", &ParseError{Oper: "XML data", Err: errors.New("invalid BOM")} + } + return string(utf16.Decode(XMLData[1:])), nil +} + +func (r *Reader) readOffsetTable(res *resourceDescriptor) (map[SHA1Hash]resourceDescriptor, []*Image, error) { + fileData := make(map[SHA1Hash]resourceDescriptor) + var images []*Image + + offsetTable, err := r.readResource(res) + if err != nil { + return nil, nil, &ParseError{Oper: "offset table", Err: err} + } + + br := bytes.NewReader(offsetTable) + for i := 0; ; i++ { + var res streamDescriptor + err := binary.Read(br, binary.LittleEndian, &res) + if err == io.EOF { + break + } + if err != nil { + return nil, nil, &ParseError{Oper: "offset table", Err: err} + } + if res.Flags()&^supportedResFlags != 0 { + return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("unsupported resource flag")} + } + + // Validation for ad-hoc testing + if validate { + sec, err := r.resourceReader(&res.resourceDescriptor) + if err != nil { + panic(fmt.Sprint(i, err)) + } + hash := sha1.New() + _, err = io.Copy(hash, sec) + sec.Close() + if err != nil { + panic(fmt.Sprint(i, err)) + } + var cmphash SHA1Hash + copy(cmphash[:], hash.Sum(nil)) + if cmphash != res.Hash { + panic(fmt.Sprint(i, "hash mismatch")) + } + } + + if res.Flags()&resFlagMetadata != 0 { + image := &Image{ + wim: r, + offset: res.resourceDescriptor, + } + images = append(images, image) + } else { + fileData[res.Hash] = res.resourceDescriptor + } + } + + if len(images) != int(r.hdr.ImageCount) { + return nil, nil, &ParseError{Oper: "offset table", Err: errors.New("mismatched image count")} + } + + return fileData, images, nil +} + +func (r *Reader) readSecurityDescriptors(rsrc io.Reader) (sds [][]byte, n int64, err error) { + var secBlock securityblockDisk + err = binary.Read(rsrc, binary.LittleEndian, &secBlock) + if err != nil { + err = &ParseError{Oper: "security table", Err: err} + return + } + + n += securityblockDiskSize + + secSizes := make([]int64, secBlock.NumEntries) + err = binary.Read(rsrc, binary.LittleEndian, &secSizes) + if err != nil { + err = &ParseError{Oper: "security table sizes", Err: err} + return + } + + n += int64(secBlock.NumEntries * 8) + + sds = make([][]byte, secBlock.NumEntries) + for i, size := range secSizes { + sd := make([]byte, size&0xffffffff) + _, err = io.ReadFull(rsrc, sd) + if err != nil { + err = &ParseError{Oper: "security descriptor", Err: err} + return + } + n += int64(len(sd)) + sds[i] = sd + } + + secsize := int64((secBlock.TotalLength + 7) &^ 7) + if n > secsize { + err = &ParseError{Oper: "security descriptor", Err: errors.New("security descriptor table too small")} + return + } + + _, err = io.CopyN(ioutil.Discard, rsrc, secsize-n) + if err != nil { + return + } + + n = secsize + return +} + +// Open parses the image and returns the root directory. +func (img *Image) Open() (*File, error) { + if img.sds == nil { + rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, img.rootOffset) + if err != nil { + return nil, err + } + sds, n, err := img.wim.readSecurityDescriptors(rsrc) + if err != nil { + rsrc.Close() + return nil, err + } + img.sds = sds + img.r = rsrc + img.rootOffset = n + img.curOffset = n + } + + f, err := img.readdir(img.rootOffset) + if err != nil { + return nil, err + } + if len(f) != 1 { + return nil, &ParseError{Oper: "root directory", Err: errors.New("expected exactly 1 root directory entry")} + } + return f[0], err +} + +func (img *Image) reset() { + if img.r != nil { + img.r.Close() + img.r = nil + } + img.curOffset = -1 +} + +func (img *Image) readdir(offset int64) ([]*File, error) { + img.m.Lock() + defer img.m.Unlock() + + if offset < img.curOffset || offset > img.curOffset+chunkSize { + // Reset to seek backward or to seek forward very far. + img.reset() + } + if img.r == nil { + rsrc, err := img.wim.resourceReaderWithOffset(&img.offset, offset) + if err != nil { + return nil, err + } + img.r = rsrc + img.curOffset = offset + } + if offset > img.curOffset { + _, err := io.CopyN(ioutil.Discard, img.r, offset-img.curOffset) + if err != nil { + img.reset() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + } + + var entries []*File + for { + e, n, err := img.readNextEntry(img.r) + img.curOffset += n + if err == io.EOF { + break + } + if err != nil { + img.reset() + return nil, err + } + entries = append(entries, e) + } + return entries, nil +} + +func (img *Image) readNextEntry(r io.Reader) (*File, int64, error) { + var length int64 + err := binary.Read(r, binary.LittleEndian, &length) + if err != nil { + return nil, 0, &ParseError{Oper: "directory length check", Err: err} + } + + if length == 0 { + return nil, 8, io.EOF + } + + left := length + if left < direntrySize { + return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short")} + } + + var dentry direntry + err = binary.Read(r, binary.LittleEndian, &dentry) + if err != nil { + return nil, 0, &ParseError{Oper: "directory entry", Err: err} + } + + left -= direntrySize + + namesLen := int64(dentry.FileNameLength + 2 + dentry.ShortNameLength) + if left < namesLen { + return nil, 0, &ParseError{Oper: "directory entry", Err: errors.New("size too short for names")} + } + + names := make([]uint16, namesLen/2) + err = binary.Read(r, binary.LittleEndian, names) + if err != nil { + return nil, 0, &ParseError{Oper: "file name", Err: err} + } + + left -= namesLen + + var name, shortName string + if dentry.FileNameLength > 0 { + name = string(utf16.Decode(names[:dentry.FileNameLength/2])) + } + + if dentry.ShortNameLength > 0 { + shortName = string(utf16.Decode(names[dentry.FileNameLength/2+1:])) + } + + var offset resourceDescriptor + zerohash := SHA1Hash{} + if dentry.Hash != zerohash { + var ok bool + offset, ok = img.wim.fileData[dentry.Hash] + if !ok { + return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %#v", dentry)} + } + } + + f := &File{ + FileHeader: FileHeader{ + Attributes: dentry.Attributes, + CreationTime: dentry.CreationTime, + LastAccessTime: dentry.LastAccessTime, + LastWriteTime: dentry.LastWriteTime, + Hash: dentry.Hash, + Size: offset.OriginalSize, + Name: name, + ShortName: shortName, + }, + + offset: offset, + img: img, + subdirOffset: dentry.SubdirOffset, + } + + isDir := false + + if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT == 0 { + f.LinkID = dentry.ReparseHardLink + if dentry.Attributes&FILE_ATTRIBUTE_DIRECTORY != 0 { + isDir = true + } + } else { + f.ReparseTag = uint32(dentry.ReparseHardLink) + f.ReparseReserved = uint32(dentry.ReparseHardLink >> 32) + } + + if isDir && f.subdirOffset == 0 { + return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("no subdirectory data for directory")} + } else if !isDir && f.subdirOffset != 0 { + return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("unexpected subdirectory data for non-directory")} + } + + if dentry.SecurityID != 0xffffffff { + f.SecurityDescriptor = img.sds[dentry.SecurityID] + } + + _, err = io.CopyN(ioutil.Discard, r, left) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, 0, err + } + + if dentry.StreamCount > 0 { + var streams []*Stream + for i := uint16(0); i < dentry.StreamCount; i++ { + s, n, err := img.readNextStream(r) + length += n + if err != nil { + return nil, 0, err + } + // The first unnamed stream should be treated as the file stream. + if i == 0 && s.Name == "" { + f.Hash = s.Hash + f.Size = s.Size + f.offset = s.offset + } else if s.Name != "" { + streams = append(streams, s) + } + } + f.Streams = streams + } + + if dentry.Attributes&FILE_ATTRIBUTE_REPARSE_POINT != 0 && f.Size == 0 { + return nil, 0, &ParseError{Oper: "directory entry", Path: name, Err: errors.New("reparse point is missing reparse stream")} + } + + return f, length, nil +} + +func (img *Image) readNextStream(r io.Reader) (*Stream, int64, error) { + var length int64 + err := binary.Read(r, binary.LittleEndian, &length) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, 0, &ParseError{Oper: "stream length check", Err: err} + } + + left := length + if left < streamentrySize { + return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short")} + } + + var sentry streamentry + err = binary.Read(r, binary.LittleEndian, &sentry) + if err != nil { + return nil, 0, &ParseError{Oper: "stream entry", Err: err} + } + + left -= streamentrySize + + if left < int64(sentry.NameLength) { + return nil, 0, &ParseError{Oper: "stream entry", Err: errors.New("size too short for name")} + } + + names := make([]uint16, sentry.NameLength/2) + err = binary.Read(r, binary.LittleEndian, names) + if err != nil { + return nil, 0, &ParseError{Oper: "file name", Err: err} + } + + left -= int64(sentry.NameLength) + name := string(utf16.Decode(names)) + + var offset resourceDescriptor + if sentry.Hash != (SHA1Hash{}) { + var ok bool + offset, ok = img.wim.fileData[sentry.Hash] + if !ok { + return nil, 0, &ParseError{Oper: "stream entry", Path: name, Err: fmt.Errorf("could not find file data matching hash %v", sentry.Hash)} + } + } + + s := &Stream{ + StreamHeader: StreamHeader{ + Hash: sentry.Hash, + Size: offset.OriginalSize, + Name: name, + }, + wim: img.wim, + offset: offset, + } + + _, err = io.CopyN(ioutil.Discard, r, left) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, 0, err + } + + return s, length, nil +} + +// Open returns an io.ReadCloser that can be used to read the stream's contents. +func (s *Stream) Open() (io.ReadCloser, error) { + return s.wim.resourceReader(&s.offset) +} + +// Open returns an io.ReadCloser that can be used to read the file's contents. +func (f *File) Open() (io.ReadCloser, error) { + return f.img.wim.resourceReader(&f.offset) +} + +// Readdir reads the directory entries. +func (f *File) Readdir() ([]*File, error) { + if !f.IsDir() { + return nil, errors.New("not a directory") + } + return f.img.readdir(f.subdirOffset) +} + +// IsDir returns whether the given file is a directory. It returns false when it +// is a directory reparse point. +func (f *FileHeader) IsDir() bool { + return f.Attributes&(FILE_ATTRIBUTE_DIRECTORY|FILE_ATTRIBUTE_REPARSE_POINT) == FILE_ATTRIBUTE_DIRECTORY +} diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 00000000..3f527639 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,520 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") +) + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name string, timeout uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _waitNamedPipe(_p0, timeout) +} + +func _waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore new file mode 100644 index 00000000..b883f1fd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/vendor/github.com/Microsoft/hcsshim/.gometalinter.json b/vendor/github.com/Microsoft/hcsshim/.gometalinter.json new file mode 100644 index 00000000..00e9a6e2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gometalinter.json @@ -0,0 +1,17 @@ +{ + "Vendor": true, + "Deadline": "2m", + "Sort": [ + "linter", + "severity", + "path", + "line" + ], + "Skip": [ + "internal\\schema2" + ], + "EnableGC": true, + "Enable": [ + "gofmt" + ] +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 00000000..49d21669 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md new file mode 100644 index 00000000..15b39181 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -0,0 +1,41 @@ +# hcsshim + +[![Build status](https://ci.appveyor.com/api/projects/status/nbcw28mnkqml0loa/branch/master?svg=true)](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master) + +This package contains the Golang interface for using the Windows [Host Compute Service](https://blogs.technet.microsoft.com/virtualization/2017/01/27/introducing-the-host-compute-service-hcs/) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS). + +It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Dependencies + +This project requires Golang 1.9 or newer to build. + +For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). + +## Reporting Security Issues + +Security issues and bugs should be reported privately, via email, to the Microsoft Security +Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should +receive a response within 24 hours. If for some reason you do not, please follow up via +email to ensure we received your original message. Further information, including the +[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in +the [Security TechCenter](https://technet.microsoft.com/en-us/security/default). + +For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet + +--------------- +Copyright (c) 2018 Microsoft Corp. All rights reserved. diff --git a/vendor/github.com/Microsoft/hcsshim/appveyor.yml b/vendor/github.com/Microsoft/hcsshim/appveyor.yml new file mode 100644 index 00000000..a8ec5a59 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/appveyor.yml @@ -0,0 +1,29 @@ +version: 0.1.{build} + +image: Visual Studio 2017 + +clone_folder: c:\gopath\src\github.com\Microsoft\hcsshim + +environment: + GOPATH: c:\gopath + PATH: C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin;%GOPATH%\bin;C:\gometalinter-2.0.12-windows-amd64;%PATH% + +stack: go 1.11 + +build_script: + - appveyor DownloadFile https://github.com/alecthomas/gometalinter/releases/download/v2.0.12/gometalinter-2.0.12-windows-amd64.zip + - 7z x gometalinter-2.0.12-windows-amd64.zip -y -oC:\ > NUL + - gometalinter.exe --config .gometalinter.json ./... + - go build ./cmd/wclayer + - go build ./cmd/runhcs + - go build ./cmd/tar2ext4 + - go test -v ./... -tags admin + - go test -c ./test/functional/ -tags functional + - go test -c ./test/runhcs/ -tags integration + +artifacts: + - path: 'wclayer.exe' + - path: 'runhcs.exe' + - path: 'tar2ext4.exe' + - path: 'functional.test.exe' + - path: 'runhcs.test.exe' \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/LICENSE b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE new file mode 100644 index 00000000..e40e4420 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE @@ -0,0 +1,22 @@ +runhcs is a fork of runc. + +The following is runc's legal notice. + +--- + +runc + +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/container.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/container.go new file mode 100644 index 00000000..2a9ccd57 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/container.go @@ -0,0 +1,848 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + winio "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/cni" + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcsoci" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/regstate" + "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var errContainerStopped = errors.New("container is stopped") + +type persistedState struct { + // ID is the id of this container/UVM. + ID string `json:",omitempty"` + // Owner is the owner value passed into the runhcs command and may be `""`. + Owner string `json:",omitempty"` + // SandboxID is the sandbox identifer passed in via OCI specifications. This + // can either be the sandbox itself or the sandbox this container should run + // in. See `parseSandboxAnnotations`. + SandboxID string `json:",omitempty"` + // HostID will be VM ID hosting this container. If a sandbox is used it will + // match the `SandboxID`. + HostID string `json:",omitempty"` + // Bundle is the folder path on disk where the container state and spec files + // reside. + Bundle string `json:",omitempty"` + Created time.Time `json:",omitempty"` + Rootfs string `json:",omitempty"` + // Spec is the in memory deserialized values found on `Bundle\config.json`. + Spec *specs.Spec `json:",omitempty"` + RequestedNetNS string `json:",omitempty"` + // IsHost is `true` when this is a VM isolated config. + IsHost bool `json:",omitempty"` + // UniqueID is a unique ID generated per container config. + UniqueID guid.GUID `json:",omitempty"` + // HostUniqueID is the unique ID of the hosting VM if this container is + // hosted. + HostUniqueID guid.GUID `json:",omitempty"` +} + +type containerStatus string + +const ( + containerRunning containerStatus = "running" + containerStopped containerStatus = "stopped" + containerCreated containerStatus = "created" + containerPaused containerStatus = "paused" + containerUnknown containerStatus = "unknown" + + keyState = "state" + keyResources = "resources" + keyShimPid = "shim" + keyInitPid = "pid" + keyNetNS = "netns" + // keyPidMapFmt is the format to use when mapping a host OS pid to a guest + // pid. + keyPidMapFmt = "pid-%d" +) + +type container struct { + persistedState + ShimPid int + hc *hcs.System + resources *hcsoci.Resources +} + +func startProcessShim(id, pidFile, logFile string, spec *specs.Process) (_ *os.Process, err error) { + // Ensure the stdio handles inherit to the child process. This isn't undone + // after the StartProcess call because the caller never launches another + // process before exiting. + for _, f := range []*os.File{os.Stdin, os.Stdout, os.Stderr} { + err = windows.SetHandleInformation(windows.Handle(f.Fd()), windows.HANDLE_FLAG_INHERIT, windows.HANDLE_FLAG_INHERIT) + if err != nil { + return nil, err + } + } + + args := []string{ + "--stdin", strconv.Itoa(int(os.Stdin.Fd())), + "--stdout", strconv.Itoa(int(os.Stdout.Fd())), + "--stderr", strconv.Itoa(int(os.Stderr.Fd())), + } + if spec != nil { + args = append(args, "--exec") + } + if strings.HasPrefix(logFile, runhcs.SafePipePrefix) { + args = append(args, "--log-pipe", logFile) + } + args = append(args, id) + return launchShim("shim", pidFile, logFile, args, spec) +} + +func launchShim(cmd, pidFile, logFile string, args []string, data interface{}) (_ *os.Process, err error) { + executable, err := os.Executable() + if err != nil { + return nil, err + } + + // Create a pipe to use as stderr for the shim process. This is used to + // retrieve early error information, up to the point that the shim is ready + // to launch a process in the container. + rp, wp, err := os.Pipe() + if err != nil { + return nil, err + } + defer rp.Close() + defer wp.Close() + + // Create a pipe to send the data, if one is provided. + var rdatap, wdatap *os.File + if data != nil { + rdatap, wdatap, err = os.Pipe() + if err != nil { + return nil, err + } + defer rdatap.Close() + defer wdatap.Close() + } + + var log *os.File + fullargs := []string{os.Args[0]} + if logFile != "" { + if !strings.HasPrefix(logFile, runhcs.SafePipePrefix) { + log, err = os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666) + if err != nil { + return nil, err + } + defer log.Close() + } + + fullargs = append(fullargs, "--log-format", logFormat) + if logrus.GetLevel() == logrus.DebugLevel { + fullargs = append(fullargs, "--debug") + } + } + fullargs = append(fullargs, cmd) + fullargs = append(fullargs, args...) + attr := &os.ProcAttr{ + Files: []*os.File{rdatap, wp, log}, + } + p, err := os.StartProcess(executable, fullargs, attr) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + p.Kill() + } + }() + + wp.Close() + + // Write the data if provided. + if data != nil { + rdatap.Close() + dataj, err := json.Marshal(data) + if err != nil { + return nil, err + } + _, err = wdatap.Write(dataj) + if err != nil { + return nil, err + } + wdatap.Close() + } + + err = runhcs.GetErrorFromPipe(rp, p) + if err != nil { + return nil, err + } + + if pidFile != "" { + if err = createPidFile(pidFile, p.Pid); err != nil { + return nil, err + } + } + + return p, nil +} + +// parseSandboxAnnotations searches `a` for various annotations used by +// different runtimes to represent a sandbox ID, and sandbox type. +// +// If found returns the tuple `(sandboxID, isSandbox)` where `isSandbox == true` +// indicates the identifer is the sandbox itself; `isSandbox == false` indicates +// the identifer is the sandbox in which to place this container. Otherwise +// returns `("", false)`. +func parseSandboxAnnotations(a map[string]string) (string, bool) { + var t, id string + if t = a["io.kubernetes.cri.container-type"]; t != "" { + id = a["io.kubernetes.cri.sandbox-id"] + } else if t = a["io.kubernetes.cri-o.ContainerType"]; t != "" { + id = a["io.kubernetes.cri-o.SandboxID"] + } else if t = a["io.kubernetes.docker.type"]; t != "" { + id = a["io.kubernetes.sandbox.id"] + if t == "podsandbox" { + t = "sandbox" + } + } + if t == "container" { + return id, false + } + if t == "sandbox" { + return id, true + } + return "", false +} + +// parseAnnotationsBool searches `a` for `key` and if found verifies that the +// value is `true` or `false` in any case. If `key` is not found returns `def`. +func parseAnnotationsBool(a map[string]string, key string, def bool) bool { + if v, ok := a[key]; ok { + switch strings.ToLower(v) { + case "true": + return true + case "false": + return false + default: + logrus.WithFields(logrus.Fields{ + logfields.OCIAnnotation: key, + logfields.Value: v, + logfields.ExpectedType: logfields.Bool, + }).Warning("annotation could not be parsed") + } + } + return def +} + +// parseAnnotationsCPU searches `s.Annotations` for the CPU annotation. If +// not found searches `s` for the Windows CPU section. If neither are found +// returns `def`. +func parseAnnotationsCPU(s *specs.Spec, annotation string, def int32) int32 { + if m := parseAnnotationsUint64(s.Annotations, annotation, 0); m != 0 { + return int32(m) + } + if s.Windows != nil && + s.Windows.Resources != nil && + s.Windows.Resources.CPU != nil && + s.Windows.Resources.CPU.Count != nil && + *s.Windows.Resources.CPU.Count > 0 { + return int32(*s.Windows.Resources.CPU.Count) + } + return def +} + +// parseAnnotationsMemory searches `s.Annotations` for the memory annotation. If +// not found searches `s` for the Windows memory section. If neither are found +// returns `def`. +func parseAnnotationsMemory(s *specs.Spec, annotation string, def int32) int32 { + if m := parseAnnotationsUint64(s.Annotations, annotation, 0); m != 0 { + return int32(m) + } + if s.Windows != nil && + s.Windows.Resources != nil && + s.Windows.Resources.Memory != nil && + s.Windows.Resources.Memory.Limit != nil && + *s.Windows.Resources.Memory.Limit > 0 { + return int32(*s.Windows.Resources.Memory.Limit) + } + return def +} + +// parseAnnotationsPreferredRootFSType searches `a` for `key` and verifies that the +// value is in the set of allowed values. If `key` is not found returns `def`. +func parseAnnotationsPreferredRootFSType(a map[string]string, key string, def uvm.PreferredRootFSType) uvm.PreferredRootFSType { + if v, ok := a[key]; ok { + switch v { + case "initrd": + return uvm.PreferredRootFSTypeInitRd + case "vhd": + return uvm.PreferredRootFSTypeVHD + default: + logrus.Warningf("annotation: '%s', with value: '%s' must be 'initrd' or 'vhd'", key, v) + } + } + return def +} + +// parseAnnotationsUint32 searches `a` for `key` and if found verifies that the +// value is a 32 bit unsigned integer. If `key` is not found returns `def`. +func parseAnnotationsUint32(a map[string]string, key string, def uint32) uint32 { + if v, ok := a[key]; ok { + countu, err := strconv.ParseUint(v, 10, 32) + if err == nil { + v := uint32(countu) + return v + } + logrus.WithFields(logrus.Fields{ + logfields.OCIAnnotation: key, + logfields.Value: v, + logfields.ExpectedType: logfields.Uint32, + logrus.ErrorKey: err, + }).Warning("annotation could not be parsed") + } + return def +} + +// parseAnnotationsUint64 searches `a` for `key` and if found verifies that the +// value is a 64 bit unsigned integer. If `key` is not found returns `def`. +func parseAnnotationsUint64(a map[string]string, key string, def uint64) uint64 { + if v, ok := a[key]; ok { + countu, err := strconv.ParseUint(v, 10, 64) + if err == nil { + return countu + } + logrus.WithFields(logrus.Fields{ + logfields.OCIAnnotation: key, + logfields.Value: v, + logfields.ExpectedType: logfields.Uint64, + logrus.ErrorKey: err, + }).Warning("annotation could not be parsed") + } + return def +} + +// startVMShim starts a vm-shim command with the specified `opts`. `opts` can be `uvm.OptionsWCOW` or `uvm.OptionsLCOW` +func (c *container) startVMShim(logFile string, opts interface{}) (*os.Process, error) { + var os string + if _, ok := opts.(*uvm.OptionsLCOW); ok { + os = "linux" + } else { + os = "windows" + } + args := []string{"--os", os} + if strings.HasPrefix(logFile, runhcs.SafePipePrefix) { + args = append(args, "--log-pipe", logFile) + } + args = append(args, c.VMPipePath()) + return launchShim("vmshim", "", logFile, args, opts) +} + +type containerConfig struct { + ID string + Owner string + HostID string + PidFile string + ShimLogFile, VMLogFile string + Spec *specs.Spec + VMConsolePipe string +} + +func createContainer(cfg *containerConfig) (_ *container, err error) { + // Store the container information in a volatile registry key. + cwd, err := os.Getwd() + if err != nil { + return nil, err + } + + vmisolated := cfg.Spec.Linux != nil || (cfg.Spec.Windows != nil && cfg.Spec.Windows.HyperV != nil) + + sandboxID, isSandbox := parseSandboxAnnotations(cfg.Spec.Annotations) + hostID := cfg.HostID + if isSandbox { + if sandboxID != cfg.ID { + return nil, errors.New("sandbox ID must match ID") + } + } else if sandboxID != "" { + // Validate that the sandbox container exists. + sandbox, err := getContainer(sandboxID, false) + if err != nil { + return nil, err + } + defer sandbox.Close() + if sandbox.SandboxID != sandboxID { + return nil, fmt.Errorf("container %s is not a sandbox", sandboxID) + } + if hostID == "" { + // Use the sandbox's host. + hostID = sandbox.HostID + } else if sandbox.HostID == "" { + return nil, fmt.Errorf("sandbox container %s is not running in a VM host, but host %s was specified", sandboxID, hostID) + } else if hostID != sandbox.HostID { + return nil, fmt.Errorf("sandbox container %s has a different host %s from the requested host %s", sandboxID, sandbox.HostID, hostID) + } + if vmisolated && hostID == "" { + return nil, fmt.Errorf("container %s is not a VM isolated sandbox", sandboxID) + } + } + + uniqueID := guid.New() + + newvm := false + var hostUniqueID guid.GUID + if hostID != "" { + host, err := getContainer(hostID, false) + if err != nil { + return nil, err + } + defer host.Close() + if !host.IsHost { + return nil, fmt.Errorf("host container %s is not a VM host", hostID) + } + hostUniqueID = host.UniqueID + } else if vmisolated && (isSandbox || cfg.Spec.Linux != nil || osversion.Get().Build >= osversion.RS5) { + // This handles all LCOW, Pod Sandbox, and (Windows Xenon V2 for RS5+) + hostID = cfg.ID + newvm = true + hostUniqueID = uniqueID + } + + // Make absolute the paths in Root.Path and Windows.LayerFolders. + rootfs := "" + if cfg.Spec.Root != nil { + rootfs = cfg.Spec.Root.Path + if rootfs != "" && !filepath.IsAbs(rootfs) && !strings.HasPrefix(rootfs, `\\?\`) { + rootfs = filepath.Join(cwd, rootfs) + cfg.Spec.Root.Path = rootfs + } + } + + netNS := "" + if cfg.Spec.Windows != nil { + for i, f := range cfg.Spec.Windows.LayerFolders { + if !filepath.IsAbs(f) && !strings.HasPrefix(rootfs, `\\?\`) { + cfg.Spec.Windows.LayerFolders[i] = filepath.Join(cwd, f) + } + } + + // Determine the network namespace to use. + if cfg.Spec.Windows.Network != nil { + if cfg.Spec.Windows.Network.NetworkSharedContainerName != "" { + // RS4 case + err = stateKey.Get(cfg.Spec.Windows.Network.NetworkSharedContainerName, keyNetNS, &netNS) + if err != nil { + if _, ok := err.(*regstate.NoStateError); !ok { + return nil, err + } + } + } else if cfg.Spec.Windows.Network.NetworkNamespace != "" { + // RS5 case + netNS = cfg.Spec.Windows.Network.NetworkNamespace + } + } + } + + // Store the initial container state in the registry so that the delete + // command can clean everything up if something goes wrong. + c := &container{ + persistedState: persistedState{ + ID: cfg.ID, + Owner: cfg.Owner, + Bundle: cwd, + Rootfs: rootfs, + Created: time.Now(), + Spec: cfg.Spec, + SandboxID: sandboxID, + HostID: hostID, + IsHost: newvm, + RequestedNetNS: netNS, + UniqueID: uniqueID, + HostUniqueID: hostUniqueID, + }, + } + err = stateKey.Create(cfg.ID, keyState, &c.persistedState) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + c.Remove() + } + }() + if isSandbox && vmisolated { + cnicfg := cni.NewPersistedNamespaceConfig(netNS, cfg.ID, hostUniqueID) + err = cnicfg.Store() + if err != nil { + return nil, err + } + defer func() { + if err != nil { + cnicfg.Remove() + } + }() + } + + // Start a VM if necessary. + if newvm { + var opts interface{} + + const ( + annotationAllowOvercommit = "io.microsoft.virtualmachine.computetopology.memory.allowovercommit" + annotationEnableDeferredCommit = "io.microsoft.virtualmachine.computetopology.memory.enabledeferredcommit" + annotationMemorySizeInMB = "io.microsoft.virtualmachine.computetopology.memory.sizeinmb" + annotationProcessorCount = "io.microsoft.virtualmachine.computetopology.processor.count" + annotationVPMemCount = "io.microsoft.virtualmachine.devices.virtualpmem.maximumcount" + annotationVPMemSize = "io.microsoft.virtualmachine.devices.virtualpmem.maximumsizebytes" + annotationPreferredRootFSType = "io.microsoft.virtualmachine.lcow.preferredrootfstype" + ) + + if cfg.Spec.Linux != nil { + lopts := uvm.NewDefaultOptionsLCOW(vmID(c.ID), cfg.Owner) + lopts.MemorySizeInMB = parseAnnotationsMemory(cfg.Spec, annotationMemorySizeInMB, lopts.MemorySizeInMB) + lopts.AllowOvercommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationAllowOvercommit, lopts.AllowOvercommit) + lopts.EnableDeferredCommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationEnableDeferredCommit, lopts.EnableDeferredCommit) + lopts.ProcessorCount = parseAnnotationsCPU(cfg.Spec, annotationProcessorCount, lopts.ProcessorCount) + lopts.ConsolePipe = cfg.VMConsolePipe + lopts.VPMemDeviceCount = parseAnnotationsUint32(cfg.Spec.Annotations, annotationVPMemCount, lopts.VPMemDeviceCount) + lopts.VPMemSizeBytes = parseAnnotationsUint64(cfg.Spec.Annotations, annotationVPMemSize, lopts.VPMemSizeBytes) + lopts.PreferredRootFSType = parseAnnotationsPreferredRootFSType(cfg.Spec.Annotations, annotationPreferredRootFSType, lopts.PreferredRootFSType) + switch lopts.PreferredRootFSType { + case uvm.PreferredRootFSTypeInitRd: + lopts.RootFSFile = uvm.InitrdFile + case uvm.PreferredRootFSTypeVHD: + lopts.RootFSFile = uvm.VhdFile + } + opts = lopts + } else { + wopts := uvm.NewDefaultOptionsWCOW(vmID(c.ID), cfg.Owner) + wopts.MemorySizeInMB = parseAnnotationsMemory(cfg.Spec, annotationMemorySizeInMB, wopts.MemorySizeInMB) + wopts.AllowOvercommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationAllowOvercommit, wopts.AllowOvercommit) + wopts.EnableDeferredCommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationEnableDeferredCommit, wopts.EnableDeferredCommit) + wopts.ProcessorCount = parseAnnotationsCPU(cfg.Spec, annotationProcessorCount, wopts.ProcessorCount) + + // In order for the UVM sandbox.vhdx not to collide with the actual + // nested Argon sandbox.vhdx we append the \vm folder to the last entry + // in the list. + layersLen := len(cfg.Spec.Windows.LayerFolders) + layers := make([]string, layersLen) + copy(layers, cfg.Spec.Windows.LayerFolders) + + vmPath := filepath.Join(layers[layersLen-1], "vm") + err := os.MkdirAll(vmPath, 0) + if err != nil { + return nil, err + } + layers[layersLen-1] = vmPath + + wopts.LayerFolders = layers + opts = wopts + } + + shim, err := c.startVMShim(cfg.VMLogFile, opts) + if err != nil { + return nil, err + } + shim.Release() + } + + if c.HostID != "" { + // Call to the VM shim process to create the container. This is done so + // that the VM process can keep track of the VM's virtual hardware + // resource use. + err = c.issueVMRequest(runhcs.OpCreateContainer) + if err != nil { + return nil, err + } + c.hc, err = hcs.OpenComputeSystem(cfg.ID) + if err != nil { + return nil, err + } + } else { + // Create the container directly from this process. + err = createContainerInHost(c, nil) + if err != nil { + return nil, err + } + } + + // Create the shim process for the container. + err = startContainerShim(c, cfg.PidFile, cfg.ShimLogFile) + if err != nil { + if e := c.Kill(); e == nil { + c.Remove() + } + return nil, err + } + + return c, nil +} + +func (c *container) ShimPipePath() string { + return runhcs.SafePipePath("runhcs-shim-" + c.UniqueID.String()) +} + +func (c *container) VMPipePath() string { + return runhcs.VMPipePath(c.HostUniqueID) +} + +func (c *container) VMIsolated() bool { + return c.HostID != "" +} + +func (c *container) unmountInHost(vm *uvm.UtilityVM, all bool) error { + resources := &hcsoci.Resources{} + err := stateKey.Get(c.ID, keyResources, resources) + if _, ok := err.(*regstate.NoStateError); ok { + return nil + } + if err != nil { + return err + } + err = hcsoci.ReleaseResources(resources, vm, all) + if err != nil { + stateKey.Set(c.ID, keyResources, resources) + return err + } + + err = stateKey.Clear(c.ID, keyResources) + if err != nil { + return err + } + return nil +} + +func (c *container) Unmount(all bool) error { + if c.VMIsolated() { + op := runhcs.OpUnmountContainerDiskOnly + if all { + op = runhcs.OpUnmountContainer + } + err := c.issueVMRequest(op) + if err != nil { + if _, ok := err.(*noVMError); ok { + logrus.WithFields(logrus.Fields{ + logfields.ContainerID: c.ID, + logfields.UVMID: c.HostID, + logrus.ErrorKey: errors.New("failed to unmount container resources"), + }).Warning("VM shim could not be contacted") + } else { + return err + } + } + } else { + c.unmountInHost(nil, false) + } + return nil +} + +func createContainerInHost(c *container, vm *uvm.UtilityVM) (err error) { + if c.hc != nil { + return errors.New("container already created") + } + + // Create the container without starting it. + opts := &hcsoci.CreateOptions{ + ID: c.ID, + Owner: c.Owner, + Spec: c.Spec, + HostingSystem: vm, + NetworkNamespace: c.RequestedNetNS, + } + vmid := "" + if vm != nil { + vmid = vm.ID() + } + logrus.WithFields(logrus.Fields{ + logfields.ContainerID: c.ID, + logfields.UVMID: vmid, + }).Info("creating container in UVM") + hc, resources, err := hcsoci.CreateContainer(opts) + if err != nil { + return err + } + defer func() { + if err != nil { + hc.Terminate() + hc.Wait() + hcsoci.ReleaseResources(resources, vm, true) + } + }() + + // Record the network namespace to support namespace sharing by container ID. + if resources.NetNS() != "" { + err = stateKey.Set(c.ID, keyNetNS, resources.NetNS()) + if err != nil { + return err + } + } + + err = stateKey.Set(c.ID, keyResources, resources) + if err != nil { + return err + } + c.hc = hc + return nil +} + +func startContainerShim(c *container, pidFile, logFile string) error { + // Launch a shim process to later execute a process in the container. + shim, err := startProcessShim(c.ID, pidFile, logFile, nil) + if err != nil { + return err + } + defer shim.Release() + defer func() { + if err != nil { + shim.Kill() + } + }() + + c.ShimPid = shim.Pid + err = stateKey.Set(c.ID, keyShimPid, shim.Pid) + if err != nil { + return err + } + + if pidFile != "" { + if err = createPidFile(pidFile, shim.Pid); err != nil { + return err + } + } + + return nil +} + +func (c *container) Close() error { + if c.hc == nil { + return nil + } + return c.hc.Close() +} + +func (c *container) Exec() error { + err := c.hc.Start() + if err != nil { + return err + } + + if c.Spec.Process == nil { + return nil + } + + // Alert the shim that the container is ready. + pipe, err := winio.DialPipe(c.ShimPipePath(), nil) + if err != nil { + return err + } + defer pipe.Close() + + shim, err := os.FindProcess(c.ShimPid) + if err != nil { + return err + } + defer shim.Release() + + err = runhcs.GetErrorFromPipe(pipe, shim) + if err != nil { + return err + } + + return nil +} + +func getContainer(id string, notStopped bool) (*container, error) { + var c container + err := stateKey.Get(id, keyState, &c.persistedState) + if err != nil { + return nil, err + } + err = stateKey.Get(id, keyShimPid, &c.ShimPid) + if err != nil { + if _, ok := err.(*regstate.NoStateError); !ok { + return nil, err + } + c.ShimPid = -1 + } + if notStopped && c.ShimPid == 0 { + return nil, errContainerStopped + } + + hc, err := hcs.OpenComputeSystem(c.ID) + if err == nil { + c.hc = hc + } else if !hcs.IsNotExist(err) { + return nil, err + } else if notStopped { + return nil, errContainerStopped + } + + return &c, nil +} + +func (c *container) Remove() error { + // Unmount any layers or mapped volumes. + err := c.Unmount(!c.IsHost) + if err != nil { + return err + } + + // Follow kata's example and delay tearing down the VM until the owning + // container is removed. + if c.IsHost { + vm, err := hcs.OpenComputeSystem(vmID(c.ID)) + if err == nil { + if err := vm.Terminate(); hcs.IsPending(err) { + vm.Wait() + } + } + } + return stateKey.Remove(c.ID) +} + +func (c *container) Kill() error { + if c.hc == nil { + return nil + } + err := c.hc.Terminate() + if hcs.IsPending(err) { + err = c.hc.Wait() + } + if hcs.IsAlreadyStopped(err) { + err = nil + } + return err +} + +func (c *container) Status() (containerStatus, error) { + if c.hc == nil || c.ShimPid == 0 { + return containerStopped, nil + } + props, err := c.hc.Properties() + if err != nil { + if !strings.Contains(err.Error(), "operation is not valid in the current state") { + return "", err + } + return containerUnknown, nil + } + state := containerUnknown + switch props.State { + case "", "Created": + state = containerCreated + case "Running": + state = containerRunning + case "Paused": + state = containerPaused + case "Stopped": + state = containerStopped + } + return state, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/create-scratch.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/create-scratch.go new file mode 100644 index 00000000..17af1ed0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/create-scratch.go @@ -0,0 +1,76 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/lcow" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" + gcsclient "github.com/Microsoft/opengcs/client" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var createScratchCommand = cli.Command{ + Name: "create-scratch", + Usage: "creates a scratch vhdx at 'destpath' that is ext4 formatted", + Description: "Creates a scratch vhdx at 'destpath' that is ext4 formatted", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "destpath", + Usage: "Required: describes the destination vhd path", + }, + }, + Before: appargs.Validate(), + Action: func(context *cli.Context) error { + dest := context.String("destpath") + if dest == "" { + return errors.New("'destpath' is required") + } + + // If we only have v1 lcow support do it the old way. + if osversion.Get().Build < osversion.RS5 { + cfg := gcsclient.Config{ + Options: gcsclient.Options{ + KirdPath: filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers"), + KernelFile: "kernel", + InitrdFile: uvm.InitrdFile, + }, + Name: "createscratch-uvm", + UvmTimeoutSeconds: 5 * 60, // 5 Min + } + + if err := cfg.StartUtilityVM(); err != nil { + return errors.Wrapf(err, "failed to start '%s'", cfg.Name) + } + defer cfg.Uvm.Terminate() + + if err := cfg.CreateExt4Vhdx(dest, lcow.DefaultScratchSizeGB, ""); err != nil { + return errors.Wrapf(err, "failed to create ext4vhdx for '%s'", cfg.Name) + } + } else { + opts := uvm.NewDefaultOptionsLCOW("createscratch-uvm", context.GlobalString("owner")) + + // 256MB with boot from vhd supported. + opts.MemorySizeInMB = 256 + opts.VPMemDeviceCount = 1 + + convertUVM, err := uvm.CreateLCOW(opts) + if err != nil { + return errors.Wrapf(err, "failed to create '%s'", opts.ID) + } + defer convertUVM.Close() + if err := convertUVM.Start(); err != nil { + return errors.Wrapf(err, "failed to start '%s'", opts.ID) + } + + if err := lcow.CreateScratch(convertUVM, dest, lcow.DefaultScratchSizeGB, "", ""); err != nil { + return errors.Wrapf(err, "failed to create ext4vhdx for '%s'", opts.ID) + } + } + + return nil + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/create.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/create.go new file mode 100644 index 00000000..5eb5b9e6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/create.go @@ -0,0 +1,100 @@ +package main + +import ( + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/urfave/cli" +) + +var createRunFlags = []cli.Flag{ + cli.StringFlag{ + Name: "bundle, b", + Value: "", + Usage: `path to the root of the bundle directory, defaults to the current directory`, + }, + cli.StringFlag{ + Name: "pid-file", + Value: "", + Usage: "specify the file to write the process id to", + }, + cli.StringFlag{ + Name: "shim-log", + Value: "", + Usage: `path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs--shim-log) for the launched shim process`, + }, + cli.StringFlag{ + Name: "vm-log", + Value: "", + Usage: `path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs--vm-log) for the launched VM shim process`, + }, + cli.StringFlag{ + Name: "vm-console", + Value: "", + Usage: `path to the pipe for the VM's console (e.g. \\.\pipe\debugpipe)`, + }, + cli.StringFlag{ + Name: "host", + Value: "", + Usage: "host container whose VM this container should run in", + }, +} + +var createCommand = cli.Command{ + Name: "create", + Usage: "create a container", + ArgsUsage: ` + +Where "" is your name for the instance of the container that you +are starting. The name you provide for the container instance must be unique on +your host.`, + Description: `The create command creates an instance of a container for a bundle. The bundle +is a directory with a specification file named "` + specConfig + `" and a root +filesystem. + +The specification file includes an args parameter. The args parameter is used +to specify command(s) that get run when the container is started. To change the +command(s) that get executed on start, edit the args parameter of the spec. See +"runc spec --help" for more explanation.`, + Flags: append(createRunFlags), + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + cfg, err := containerConfigFromContext(context) + if err != nil { + return err + } + _, err = createContainer(cfg) + if err != nil { + return err + } + return nil + }, +} + +func containerConfigFromContext(context *cli.Context) (*containerConfig, error) { + id := context.Args().First() + pidFile, err := absPathOrEmpty(context.String("pid-file")) + if err != nil { + return nil, err + } + shimLog, err := absPathOrEmpty(context.String("shim-log")) + if err != nil { + return nil, err + } + vmLog, err := absPathOrEmpty(context.String("vm-log")) + if err != nil { + return nil, err + } + spec, err := setupSpec(context) + if err != nil { + return nil, err + } + return &containerConfig{ + ID: id, + Owner: context.GlobalString("owner"), + PidFile: pidFile, + ShimLogFile: shimLog, + VMLogFile: vmLog, + VMConsolePipe: context.String("vm-console"), + Spec: spec, + HostID: context.String("host"), + }, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/delete.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/delete.go new file mode 100644 index 00000000..cebea043 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/delete.go @@ -0,0 +1,73 @@ +package main + +import ( + "fmt" + "os" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/regstate" + "github.com/urfave/cli" +) + +var deleteCommand = cli.Command{ + Name: "delete", + Usage: "delete any resources held by the container often used with detached container", + ArgsUsage: ` + +Where "" is the name for the instance of the container. + +EXAMPLE: +For example, if the container id is "ubuntu01" and runhcs list currently shows the +status of "ubuntu01" as "stopped" the following will delete resources held for +"ubuntu01" removing "ubuntu01" from the runhcs list of containers: + + # runhcs delete ubuntu01`, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "force, f", + Usage: "Forcibly deletes the container if it is still running (uses SIGKILL)", + }, + }, + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + id := context.Args().First() + force := context.Bool("force") + container, err := getContainer(id, false) + if err != nil { + if _, ok := err.(*regstate.NoStateError); ok { + if e := stateKey.Remove(id); e != nil { + fmt.Fprintf(os.Stderr, "remove %s: %v\n", id, e) + } + if force { + return nil + } + } + return err + } + defer container.Close() + s, err := container.Status() + if err != nil { + return err + } + + kill := false + switch s { + case containerStopped: + case containerCreated: + kill = true + default: + if !force { + return fmt.Errorf("cannot delete container %s that is not stopped: %s\n", id, s) + } + kill = true + } + + if kill { + err = container.Kill() + if err != nil { + return err + } + } + return container.Remove() + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/exec.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/exec.go new file mode 100644 index 00000000..befc79f0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/exec.go @@ -0,0 +1,160 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/urfave/cli" +) + +var execCommand = cli.Command{ + Name: "exec", + Usage: "execute new process inside the container", + ArgsUsage: ` [command options] || -p process.json + +Where "" is the name for the instance of the container and +"" is the command to be executed in the container. +"" can't be empty unless a "-p" flag provided. + +EXAMPLE: +For example, if the container is configured to run the linux ps command the +following will output a list of processes running in the container: + + # runhcs exec ps`, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "cwd", + Usage: "current working directory in the container", + }, + cli.StringSliceFlag{ + Name: "env, e", + Usage: "set environment variables", + }, + cli.BoolFlag{ + Name: "tty, t", + Usage: "allocate a pseudo-TTY", + }, + cli.StringFlag{ + Name: "user, u", + }, + cli.StringFlag{ + Name: "process, p", + Usage: "path to the process.json", + }, + cli.BoolFlag{ + Name: "detach,d", + Usage: "detach from the container's process", + }, + cli.StringFlag{ + Name: "pid-file", + Value: "", + Usage: "specify the file to write the process id to", + }, + cli.StringFlag{ + Name: "shim-log", + Value: "", + Usage: `path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs---log) for the launched shim process`, + }, + }, + Before: appargs.Validate(argID, appargs.Rest(appargs.String)), + Action: func(context *cli.Context) error { + id := context.Args().First() + pidFile, err := absPathOrEmpty(context.String("pid-file")) + if err != nil { + return err + } + shimLog, err := absPathOrEmpty(context.String("shim-log")) + if err != nil { + return err + } + c, err := getContainer(id, false) + if err != nil { + return err + } + defer c.Close() + status, err := c.Status() + if err != nil { + return err + } + if status != containerRunning { + return errContainerStopped + } + spec, err := getProcessSpec(context, c) + if err != nil { + return err + } + p, err := startProcessShim(id, pidFile, shimLog, spec) + if err != nil { + return err + } + if !context.Bool("detach") { + state, err := p.Wait() + if err != nil { + return err + } + os.Exit(int(state.Sys().(syscall.WaitStatus).ExitCode)) + } + return nil + }, + SkipArgReorder: true, +} + +func getProcessSpec(context *cli.Context, c *container) (*specs.Process, error) { + if path := context.String("process"); path != "" { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + var p specs.Process + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + return &p, validateProcessSpec(&p) + } + + // process via cli flags + p := c.Spec.Process + + if len(context.Args()) == 1 { + return nil, fmt.Errorf("process args cannot be empty") + } + p.Args = context.Args()[1:] + // override the cwd, if passed + if context.String("cwd") != "" { + p.Cwd = context.String("cwd") + } + // append the passed env variables + p.Env = append(p.Env, context.StringSlice("env")...) + + // set the tty + if context.IsSet("tty") { + p.Terminal = context.Bool("tty") + } + // override the user, if passed + if context.String("user") != "" { + p.User.Username = context.String("user") + } + return p, nil +} + +func validateProcessSpec(spec *specs.Process) error { + if spec.Cwd == "" { + return fmt.Errorf("Cwd property must not be empty") + } + // IsAbs doesnt recognize Unix paths on Windows builds so handle that case + // here. + if !filepath.IsAbs(spec.Cwd) && !strings.HasPrefix(spec.Cwd, "/") { + return fmt.Errorf("Cwd must be an absolute path") + } + if len(spec.Args) == 0 { + return fmt.Errorf("args must not be empty") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/kill.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/kill.go new file mode 100644 index 00000000..2b713e01 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/kill.go @@ -0,0 +1,193 @@ +package main + +import ( + "strconv" + "strings" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/schema1" + "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var killCommand = cli.Command{ + Name: "kill", + Usage: "kill sends the specified signal (default: SIGTERM) to the container's init process", + ArgsUsage: ` [signal] + +Where "" is the name for the instance of the container and +"[signal]" is the signal to be sent to the init process. + +EXAMPLE: +For example, if the container id is "ubuntu01" the following will send a "KILL" +signal to the init process of the "ubuntu01" container: + + # runhcs kill ubuntu01 KILL`, + Flags: []cli.Flag{}, + Before: appargs.Validate(argID, appargs.Optional(appargs.String)), + Action: func(context *cli.Context) error { + id := context.Args().First() + c, err := getContainer(id, true) + if err != nil { + return err + } + defer c.Close() + status, err := c.Status() + if err != nil { + return err + } + if status != containerRunning { + return errContainerStopped + } + + signalsSupported := false + + // The Signal feature was added in RS5 + if osversion.Get().Build >= osversion.RS5 { + if c.IsHost || c.HostID != "" { + var hostID string + if c.IsHost { + // This is the LCOW, Pod Sandbox, or Windows Xenon V2 for RS5+ + hostID = vmID(c.ID) + } else { + // This is the Nth container in a Pod + hostID = c.HostID + } + uvm, err := hcs.OpenComputeSystem(hostID) + if err != nil { + return err + } + defer uvm.Close() + if props, err := uvm.Properties(schema1.PropertyTypeGuestConnection); err == nil && + props.GuestConnectionInfo.GuestDefinedCapabilities.SignalProcessSupported { + signalsSupported = true + } + } else if c.Spec.Linux == nil && c.Spec.Windows.HyperV == nil { + // RS5+ Windows Argon + signalsSupported = true + } + } + + signal := 0 + if signalsSupported { + signal, err = validateSigstr(context.Args().Get(1), signalsSupported, c.Spec.Linux != nil) + if err != nil { + return err + } + } + + var pid int + if err := stateKey.Get(id, keyInitPid, &pid); err != nil { + return err + } + + p, err := c.hc.OpenProcess(pid) + if err != nil { + return err + } + defer p.Close() + + if signalsSupported && (c.Spec.Linux != nil || !c.Spec.Process.Terminal) { + opts := guestrequest.SignalProcessOptions{ + Signal: signal, + } + return p.Signal(opts) + } + + // Legacy signal issue a kill + return p.Kill() + }, +} + +func validateSigstr(sigstr string, signalsSupported bool, isLcow bool) (int, error) { + errInvalidSignal := errors.Errorf("invalid signal '%s'", sigstr) + + // All flavors including legacy default to SIGTERM on LCOW CtrlC on Windows + if sigstr == "" { + if isLcow { + return 0xf, nil + } + return 0, nil + } + + sigstr = strings.ToUpper(sigstr) + + if !signalsSupported { + // If signals arent supported we just validate that its a known signal. + // We already return 0 since we only supported a platform Kill() at that + // time. + if isLcow { + switch sigstr { + case "15": + fallthrough + case "TERM": + fallthrough + case "SIGTERM": + return 0, nil + default: + return 0, errInvalidSignal + } + } + switch sigstr { + // Docker sends a UNIX term in the supported Windows Signal map. + case "15": + fallthrough + case "TERM": + fallthrough + case "0": + fallthrough + case "CTRLC": + return 0, nil + case "9": + fallthrough + case "KILL": + return 0, nil + default: + return 0, errInvalidSignal + } + } else { + if !isLcow { + // Docker sends the UNIX signal name or value. Convert them to the + // correct Windows signals. + switch sigstr { + case "15": + fallthrough + case "TERM": + return 0x0, nil // Convert to CTRLC + case "9": + fallthrough + case "KILL": + return 0x6, nil // Convert to CTRLSHUTDOWN + } + } + } + + var sigmap map[string]int + if isLcow { + sigmap = signalMapLcow + } else { + sigmap = signalMapWindows + } + + signal, err := strconv.Atoi(sigstr) + if err != nil { + // Signal might still match the string value + for k, v := range sigmap { + if k == sigstr { + return v, nil + } + } + return 0, errInvalidSignal + } + + // Match signal by value + for _, v := range sigmap { + if signal == v { + return signal, nil + } + } + return 0, errInvalidSignal +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/kill_test.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/kill_test.go new file mode 100644 index 00000000..4cef065e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/kill_test.go @@ -0,0 +1,111 @@ +package main + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func runValidateSigstrTest(sigstr string, signalsSupported, isLcow bool, + expectedSignal int, expectedError bool, t *testing.T) { + signal, err := validateSigstr(sigstr, signalsSupported, isLcow) + if expectedError { + if err == nil { + t.Fatalf("Expected err: %v, got: nil", expectedError) + } else if err.Error() != fmt.Sprintf("invalid signal '%s'", sigstr) { + t.Fatalf("Expected err: %v, got: %v", expectedError, err) + } + } + if signal != expectedSignal { + t.Fatalf("Test - Signal: %s, Support: %v, LCOW: %v\nExpected signal: %v, got: %v", + sigstr, signalsSupported, isLcow, + expectedSignal, signal) + } +} + +func Test_ValidateSigstr_Empty(t *testing.T) { + runValidateSigstrTest("", false, false, 0, false, t) + runValidateSigstrTest("", false, true, 0xf, false, t) + runValidateSigstrTest("", true, false, 0, false, t) + runValidateSigstrTest("", true, true, 0xf, false, t) +} + +func Test_ValidateSigstr_LCOW_NoSignalSupport_Default(t *testing.T) { + runValidateSigstrTest("15", false, true, 0, false, t) + runValidateSigstrTest("TERM", false, true, 0, false, t) + runValidateSigstrTest("SIGTERM", false, true, 0, false, t) +} + +func Test_ValidateSigstr_LCOW_NoSignalSupport_Default_Invalid(t *testing.T) { + runValidateSigstrTest("2", false, true, 0, true, t) + runValidateSigstrTest("test", false, true, 0, true, t) +} + +func Test_ValidateSigstr_WCOW_NoSignalSupport_Default(t *testing.T) { + runValidateSigstrTest("15", false, false, 0, false, t) + runValidateSigstrTest("TERM", false, false, 0, false, t) + runValidateSigstrTest("0", false, false, 0, false, t) + runValidateSigstrTest("CTRLC", false, false, 0, false, t) + runValidateSigstrTest("9", false, false, 0, false, t) + runValidateSigstrTest("KILL", false, false, 0, false, t) +} + +func Test_ValidateSigstr_WCOW_NoSignalSupport_Default_Invalid(t *testing.T) { + runValidateSigstrTest("2", false, false, 0, true, t) + runValidateSigstrTest("test", false, false, 0, true, t) +} + +func Test_ValidateSigstr_LCOW_SignalSupport_SignalNames(t *testing.T) { + for k, v := range signalMapLcow { + runValidateSigstrTest(k, true, true, v, false, t) + // run it again with a case not in the map + lc := strings.ToLower(k) + if k == lc { + t.Fatalf("Expected lower casing - map: %v, got: %v", k, lc) + } + runValidateSigstrTest(lc, true, true, v, false, t) + } +} + +func Test_ValidateSigstr_WCOW_SignalSupport_SignalNames(t *testing.T) { + for k, v := range signalMapWindows { + runValidateSigstrTest(k, true, false, v, false, t) + // run it again with a case not in the map + lc := strings.ToLower(k) + if k == lc { + t.Fatalf("Expected lower casing - map: %v, got: %v", k, lc) + } + runValidateSigstrTest(lc, true, false, v, false, t) + } +} + +func Test_ValidateSigstr_LCOW_SignalSupport_SignalValues(t *testing.T) { + for _, v := range signalMapLcow { + str := strconv.Itoa(v) + runValidateSigstrTest(str, true, true, v, false, t) + } +} + +func Test_ValidateSigstr_WCOW_SignalSupport_SignalValues(t *testing.T) { + for _, v := range signalMapWindows { + str := strconv.Itoa(v) + runValidateSigstrTest(str, true, false, v, false, t) + } +} + +func Test_ValidateSigstr_WCOW_SignalSupport_Docker_SignalNames(t *testing.T) { + // Docker KILL -> CTRLSHUTDOWN when signals are supported + runValidateSigstrTest("KILL", true, false, 0x6, false, t) + + // Docker TERM -> CTRLSHUTDOWN when signals are supported + runValidateSigstrTest("TERM", true, false, 0x0, false, t) +} + +func Test_ValidateSigstr_WCOW_SignalSupport_Docker_SignalValues(t *testing.T) { + // Docker KILL -> CTRLSHUTDOWN when signals are supported + runValidateSigstrTest("9", true, false, 0x6, false, t) + + // Docker TERM -> CTRLSHUTDOWN when signals are supported + runValidateSigstrTest("15", true, false, 0x0, false, t) +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/list.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/list.go new file mode 100644 index 00000000..a1b1a755 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/list.go @@ -0,0 +1,116 @@ +package main + +import ( + "fmt" + "os" + "text/tabwriter" + "time" + + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/urfave/cli" +) + +const formatOptions = `table or json` + +var listCommand = cli.Command{ + Name: "list", + Usage: "lists containers started by runhcs with the given root", + ArgsUsage: ` + +Where the given root is specified via the global option "--root" +(default: "/run/runhcs"). + +EXAMPLE 1: +To list containers created via the default "--root": + # runhcs list + +EXAMPLE 2: +To list containers created using a non-default value for "--root": + # runhcs --root value list`, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "format, f", + Value: "table", + Usage: `select one of: ` + formatOptions, + }, + cli.BoolFlag{ + Name: "quiet, q", + Usage: "display only container IDs", + }, + }, + Before: appargs.Validate(), + Action: func(context *cli.Context) error { + s, err := getContainers(context) + if err != nil { + return err + } + + if context.Bool("quiet") { + for _, item := range s { + fmt.Println(item.ID) + } + return nil + } + + switch context.String("format") { + case "table": + w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0) + fmt.Fprint(w, "ID\tPID\tSTATUS\tBUNDLE\tCREATED\tOWNER\n") + for _, item := range s { + fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\n", + item.ID, + item.InitProcessPid, + item.Status, + item.Bundle, + item.Created.Format(time.RFC3339Nano), + item.Owner) + } + if err := w.Flush(); err != nil { + return err + } + case "json": + if err := json.NewEncoder(os.Stdout).Encode(s); err != nil { + return err + } + default: + return fmt.Errorf("invalid format option") + } + return nil + }, +} + +func getContainers(context *cli.Context) ([]runhcs.ContainerState, error) { + ids, err := stateKey.Enumerate() + if err != nil { + return nil, err + } + + var s []runhcs.ContainerState + for _, id := range ids { + c, err := getContainer(id, false) + if err != nil { + fmt.Fprintf(os.Stderr, "reading state for %s: %v\n", id, err) + continue + } + status, err := c.Status() + if err != nil { + fmt.Fprintf(os.Stderr, "reading status for %s: %v\n", id, err) + } + + s = append(s, runhcs.ContainerState{ + ID: id, + Version: c.Spec.Version, + InitProcessPid: c.ShimPid, + Status: string(status), + Bundle: c.Bundle, + Rootfs: c.Rootfs, + Created: c.Created, + Annotations: c.Spec.Annotations, + }) + c.Close() + } + return s, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/main.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/main.go new file mode 100644 index 00000000..e13dbc09 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/main.go @@ -0,0 +1,174 @@ +package main + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/pkg/etwlogrus" + "github.com/Microsoft/hcsshim/internal/regstate" + "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +// Add a manifest to get proper Windows version detection. +// +// goversioninfo can be installed with "go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo" + +//go:generate goversioninfo -platform-specific + +// version will be populated by the Makefile, read from +// VERSION file of the source code. +var version = "" + +// gitCommit will be the hash that the binary was built from +// and will be populated by the Makefile +var gitCommit = "" + +var stateKey *regstate.Key + +var logFormat string + +const ( + specConfig = "config.json" + usage = `Open Container Initiative runtime for Windows + +runhcs is a fork of runc, modified to run containers on Windows with or without Hyper-V isolation. Like runc, it is a command line client for running applications packaged according to the Open Container Initiative (OCI) format. + +runhcs integrates with existing process supervisors to provide a production container runtime environment for applications. It can be used with your existing process monitoring tools and the container will be spawned as a direct child of the process supervisor. + +Containers are configured using bundles. A bundle for a container is a directory that includes a specification file named "` + specConfig + `". Bundle contents will depend on the container type. + +To start a new instance of a container: + + # runhcs run [ -b bundle ] + +Where "" is your name for the instance of the container that you are starting. The name you provide for the container instance must be unique on your host. Providing the bundle directory using "-b" is optional. The default value for "bundle" is the current directory.` +) + +func main() { + // Provider ID: 0b52781f-b24d-5685-ddf6-69830ed40ec3 + // Hook isn't closed explicitly, as it will exist until process exit. + if hook, err := etwlogrus.NewHook("Microsoft.Virtualization.RunHCS"); err == nil { + logrus.AddHook(hook) + } else { + logrus.Error(err) + } + + app := cli.NewApp() + app.Name = "runhcs" + app.Usage = usage + + var v []string + if version != "" { + v = append(v, version) + } + if gitCommit != "" { + v = append(v, fmt.Sprintf("commit: %s", gitCommit)) + } + v = append(v, fmt.Sprintf("spec: %s", specs.Version)) + app.Version = strings.Join(v, "\n") + + app.Flags = []cli.Flag{ + cli.BoolFlag{ + Name: "debug", + Usage: "enable debug output for logging", + }, + cli.StringFlag{ + Name: "log", + Value: "nul", + Usage: `set the log file path or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-log) where internal debug information is written`, + }, + cli.StringFlag{ + Name: "log-format", + Value: "text", + Usage: "set the format used by logs ('text' (default), or 'json')", + }, + cli.StringFlag{ + Name: "owner", + Value: "runhcs", + Usage: "compute system owner", + }, + cli.StringFlag{ + Name: "root", + Value: "default", + Usage: "registry key for storage of container state", + }, + } + app.Commands = []cli.Command{ + createCommand, + createScratchCommand, + deleteCommand, + // eventsCommand, + execCommand, + killCommand, + listCommand, + pauseCommand, + psCommand, + resizeTtyCommand, + resumeCommand, + runCommand, + shimCommand, + startCommand, + stateCommand, + // updateCommand, + vmshimCommand, + } + app.Before = func(context *cli.Context) error { + if context.GlobalBool("debug") { + logrus.SetLevel(logrus.DebugLevel) + } + if path := context.GlobalString("log"); path != "" { + var f io.Writer + var err error + if strings.HasPrefix(path, runhcs.SafePipePrefix) { + f, err = winio.DialPipe(path, nil) + } else { + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666) + } + if err != nil { + return err + } + logrus.SetOutput(f) + } + switch logFormat = context.GlobalString("log-format"); logFormat { + case "text": + // retain logrus's default. + case "json": + logrus.SetFormatter(new(logrus.JSONFormatter)) + default: + return fmt.Errorf("unknown log-format %q", logFormat) + } + + var err error + stateKey, err = regstate.Open(context.GlobalString("root"), false) + if err != nil { + return err + } + return nil + } + // If the command returns an error, cli takes upon itself to print + // the error on cli.ErrWriter and exit. + // Use our own writer here to ensure the log gets sent to the right location. + fatalWriter.Writer = cli.ErrWriter + cli.ErrWriter = &fatalWriter + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(cli.ErrWriter, err) + os.Exit(1) + } +} + +type logErrorWriter struct { + Writer io.Writer +} + +var fatalWriter logErrorWriter + +func (f *logErrorWriter) Write(p []byte) (n int, err error) { + logrus.Error(string(p)) + return f.Writer.Write(p) +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/pause.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/pause.go new file mode 100644 index 00000000..f9b36541 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/pause.go @@ -0,0 +1,58 @@ +package main + +import ( + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/urfave/cli" +) + +var pauseCommand = cli.Command{ + Name: "pause", + Usage: "pause suspends all processes inside the container", + ArgsUsage: ` + +Where "" is the name for the instance of the container to be +paused. `, + Description: `The pause command suspends all processes in the instance of the container. + +Use runhcs list to identify instances of containers and their current status.`, + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + id := context.Args().First() + container, err := getContainer(id, true) + if err != nil { + return err + } + defer container.Close() + if err := container.hc.Pause(); err != nil { + return err + } + + return nil + }, +} + +var resumeCommand = cli.Command{ + Name: "resume", + Usage: "resumes all processes that have been previously paused", + ArgsUsage: ` + +Where "" is the name for the instance of the container to be +resumed.`, + Description: `The resume command resumes all processes in the instance of the container. + +Use runhcs list to identify instances of containers and their current status.`, + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + id := context.Args().First() + container, err := getContainer(id, true) + if err != nil { + return err + } + defer container.Close() + if err := container.hc.Resume(); err != nil { + return err + } + + return nil + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/ps.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/ps.go new file mode 100644 index 00000000..2f8bf1fb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/ps.go @@ -0,0 +1,51 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/schema1" + "github.com/urfave/cli" +) + +var psCommand = cli.Command{ + Name: "ps", + Usage: "ps displays the processes running inside a container", + ArgsUsage: ` [ps options]`, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "format, f", + Value: "json", + Usage: `select one of: ` + formatOptions, + }, + }, + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + id := context.Args().First() + container, err := getContainer(id, true) + if err != nil { + return err + } + defer container.Close() + + props, err := container.hc.Properties(schema1.PropertyTypeProcessList) + if err != nil { + return err + } + + var pids []int + for _, p := range props.ProcessList { + pids = append(pids, int(p.ProcessId)) + } + + switch context.String("format") { + case "json": + return json.NewEncoder(os.Stdout).Encode(pids) + default: + return fmt.Errorf("invalid format option") + } + }, + SkipArgReorder: true, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/resource_windows_386.syso b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/resource_windows_386.syso new file mode 100644 index 0000000000000000000000000000000000000000..b4320575e4c5c95bfe35813298ae82a61e87fa35 GIT binary patch literal 968 zcma)5!HU#C5UuPE$O?joJ$P(`*Pcmq1~W@$M?@C}#9iEVJW40&>@+0ZA)OgrMA%R8 z3&bxF5wCuee;~dj6UaIqtfacCt6sgTbdnd2L$!S0Cu+TcGc0WxH_>*9kPG2{hOK1} z`lPw*T_<~V7cL{Zz6W~-3)2&);Sc^1KN#!;QP<*EJ1K{1{3js3Ko8rY;Sv3S|FyS2 zbjFA->73qTeNM07E$Eb9V!yz80>74@gI~Yfr|*heJ8!Z?16*BZ8=?x11HOt%Ql#k; zY>G0HVAhn9K^|uSj7MYWw6RaI5(RXC_!%kewVoU(Vdu7biS$ znXQCQL}s*{xmVJ<^W!_WLt(AVqwI=-K--zSF7=U(S27ot=PE9ZHEG3TlOGA24>!Kc za-mf!ZME!o0~LdWiL`O4iV9;KmutO>Z8+-3wlMG&qEeB{RP`UNZ@}vzw(Xjhq9`&I zgAFZP`Z`=oc%O6jM(M=7v&{Dx=YIq2x+qLp$>j8$Es~l0A&_4BSojZlltz#EIF$j9 zreea!L7GfF;ZMX=d~`=Wo^Z4~KsdTFG0;G54;q_myw6&0HQ#&d_5aTLtK4ti`X87o Bw2uG) literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/resource_windows_amd64.syso b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/resource_windows_amd64.syso new file mode 100644 index 0000000000000000000000000000000000000000..d6100a85b94087599ac086568b810e2fba5e51c1 GIT binary patch literal 968 zcma)5!EVz)5FIzU6bXq#4;&YZ>n1TI!l|65LbOsYw4x;((b`@&E7`l2*9lapQcs-u z0>l>}gt+oe`~l+GjwP3HVC>nMoq6+Sytd*-|z1PQP<#CJ1K`M{3js3Ko6Rs;Sv3S|FyF} zaK?x(>4M&3J)u|d7Ia20v0q?4ggKbXJIQnrzWRYuQTZ!mJ1)RBpmzM8qOFHgDe zGFu8Ai_~a2bFZa!=STPN1;SdHh3Pc|fwnVuUFbs_tz;%F&s0ZDHWcMX5rSs`5Wt-+@;{Y}?f>MV_ZB z0vlR3^i{Z$;1TESjnc7sXPGx*oc|55>pV9_DdV#Xwuoo$2VahoXTp2J!z6sl$BFcL zI298<_LF!r65d2i#f>}c@C3u%0fOP3iGc=cd(hb2;(b=qtH1Zg>;9e9SJ|uI`XA_n BwIl!l literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/run.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/run.go new file mode 100644 index 00000000..04cf868c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/run.go @@ -0,0 +1,64 @@ +package main + +import ( + "os" + "syscall" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/urfave/cli" +) + +// default action is to start a container +var runCommand = cli.Command{ + Name: "run", + Usage: "create and run a container", + ArgsUsage: ` + +Where "" is your name for the instance of the container that you +are starting. The name you provide for the container instance must be unique on +your host.`, + Description: `The run command creates an instance of a container for a bundle. The bundle +is a directory with a specification file named "` + specConfig + `" and a root +filesystem. + +The specification file includes an args parameter. The args parameter is used +to specify command(s) that get run when the container is started. To change the +command(s) that get executed on start, edit the args parameter of the spec.`, + Flags: append(createRunFlags, + cli.BoolFlag{ + Name: "detach, d", + Usage: "detach from the container's process", + }, + ), + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + cfg, err := containerConfigFromContext(context) + if err != nil { + return err + } + c, err := createContainer(cfg) + if err != nil { + return err + } + if err != nil { + return err + } + p, err := os.FindProcess(c.ShimPid) + if err != nil { + return err + } + err = c.Exec() + if err != nil { + return err + } + if !context.Bool("detach") { + state, err := p.Wait() + if err != nil { + return err + } + c.Remove() + os.Exit(int(state.Sys().(syscall.WaitStatus).ExitCode)) + } + return nil + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/runhcs.exe.manifest b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/runhcs.exe.manifest new file mode 100644 index 00000000..0eee5a83 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/runhcs.exe.manifest @@ -0,0 +1,10 @@ + + + runhcs + + + + + + + diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/shim.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/shim.go new file mode 100644 index 00000000..a31913f1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/shim.go @@ -0,0 +1,323 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strings" + "sync" + "time" + + winio "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/lcow" + "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/Microsoft/hcsshim/internal/schema2" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" + "golang.org/x/sys/windows" +) + +func containerPipePath(id string) string { + return runhcs.SafePipePath("runhcs-shim-" + id) +} + +func newFile(context *cli.Context, param string) *os.File { + fd := uintptr(context.Int(param)) + if fd == 0 { + return nil + } + return os.NewFile(fd, "") +} + +var shimCommand = cli.Command{ + Name: "shim", + Usage: `launch the process and proxy stdio (do not call it outside of runhcs)`, + Hidden: true, + Flags: []cli.Flag{ + &cli.IntFlag{Name: "stdin", Hidden: true}, + &cli.IntFlag{Name: "stdout", Hidden: true}, + &cli.IntFlag{Name: "stderr", Hidden: true}, + &cli.BoolFlag{Name: "exec", Hidden: true}, + cli.StringFlag{Name: "log-pipe", Hidden: true}, + }, + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + logPipe := context.String("log-pipe") + if logPipe != "" { + lpc, err := winio.DialPipe(logPipe, nil) + if err != nil { + return err + } + defer lpc.Close() + logrus.SetOutput(lpc) + } else { + logrus.SetOutput(os.Stderr) + } + fatalWriter.Writer = os.Stdout + + id := context.Args().First() + c, err := getContainer(id, true) + if err != nil { + return err + } + defer c.Close() + + // Asynchronously wait for the container to exit. + containerExitCh := make(chan error) + go func() { + containerExitCh <- c.hc.WaitExpectedError(hcs.ErrAlreadyClosed) + }() + + // Get File objects for the open stdio files passed in as arguments. + stdin := newFile(context, "stdin") + stdout := newFile(context, "stdout") + stderr := newFile(context, "stderr") + + exec := context.Bool("exec") + terminateOnFailure := false + + errorOut := io.WriteCloser(os.Stdout) + + var spec *specs.Process + + if exec { + // Read the process spec from stdin. + specj, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + os.Stdin.Close() + + spec = new(specs.Process) + err = json.Unmarshal(specj, spec) + if err != nil { + return err + } + + } else { + // Stdin is not used. + os.Stdin.Close() + + // Listen on the named pipe associated with this container. + l, err := winio.ListenPipe(c.ShimPipePath(), nil) + if err != nil { + return err + } + + // Alert the parent process that initialization has completed + // successfully. + errorOut.Write(runhcs.ShimSuccess) + errorOut.Close() + fatalWriter.Writer = ioutil.Discard + + // When this process exits, clear this process's pid in the registry. + defer func() { + stateKey.Set(id, keyShimPid, 0) + }() + + defer func() { + if terminateOnFailure { + if err = c.hc.Terminate(); hcs.IsPending(err) { + <-containerExitCh + } + } + }() + terminateOnFailure = true + + // Wait for a connection to the named pipe, exiting if the container + // exits before this happens. + var pipe net.Conn + pipeCh := make(chan error) + go func() { + var err error + pipe, err = l.Accept() + pipeCh <- err + }() + + select { + case err = <-pipeCh: + if err != nil { + return err + } + case err = <-containerExitCh: + if err != nil { + return err + } + return cli.NewExitError("", 1) + } + + // The next set of errors goes to the open pipe connection. + errorOut = pipe + fatalWriter.Writer = pipe + + // The process spec comes from the original container spec. + spec = c.Spec.Process + } + + // Create the process in the container. + var wpp *hcsschema.ProcessParameters // Windows Process Parameters + var lpp *lcow.ProcessParameters // Linux Process Parameters + + var p *hcs.Process + + if c.Spec.Linux == nil { + environment := make(map[string]string) + for _, v := range spec.Env { + s := strings.SplitN(v, "=", 2) + if len(s) == 2 && len(s[1]) > 0 { + environment[s[0]] = s[1] + } + } + wpp = &hcsschema.ProcessParameters{ + WorkingDirectory: spec.Cwd, + EmulateConsole: spec.Terminal, + Environment: environment, + User: spec.User.Username, + } + for i, arg := range spec.Args { + e := windows.EscapeArg(arg) + if i == 0 { + wpp.CommandLine = e + } else { + wpp.CommandLine += " " + e + } + } + if spec.ConsoleSize != nil { + wpp.ConsoleSize = []int32{ + int32(spec.ConsoleSize.Height), + int32(spec.ConsoleSize.Width), + } + } + + wpp.CreateStdInPipe = stdin != nil + wpp.CreateStdOutPipe = stdout != nil + wpp.CreateStdErrPipe = stderr != nil + + p, err = c.hc.CreateProcess(wpp) + + } else { + lpp = &lcow.ProcessParameters{} + if exec { + lpp.OCIProcess = spec + } + + lpp.CreateStdInPipe = stdin != nil + lpp.CreateStdOutPipe = stdout != nil + lpp.CreateStdErrPipe = stderr != nil + + p, err = c.hc.CreateProcess(lpp) + } + + if err != nil { + return err + } + + cstdin, cstdout, cstderr, err := p.Stdio() + if err != nil { + return err + } + + if !exec { + err = stateKey.Set(c.ID, keyInitPid, p.Pid()) + if err != nil { + return err + } + } + + // Store the Guest pid map + err = stateKey.Set(c.ID, fmt.Sprintf(keyPidMapFmt, os.Getpid()), p.Pid()) + if err != nil { + return err + } + defer func() { + // Remove the Guest pid map when this process is cleaned up + stateKey.Clear(c.ID, fmt.Sprintf(keyPidMapFmt, os.Getpid())) + }() + + terminateOnFailure = false + + // Alert the connected process that the process was launched + // successfully. + errorOut.Write(runhcs.ShimSuccess) + errorOut.Close() + fatalWriter.Writer = ioutil.Discard + + // Relay stdio. + var wg sync.WaitGroup + if cstdin != nil { + go func() { + io.Copy(cstdin, stdin) + cstdin.Close() + p.CloseStdin() + }() + } + + if cstdout != nil { + wg.Add(1) + go func() { + io.Copy(stdout, cstdout) + stdout.Close() + cstdout.Close() + wg.Done() + }() + } + + if cstderr != nil { + wg.Add(1) + go func() { + io.Copy(stderr, cstderr) + stderr.Close() + cstderr.Close() + wg.Done() + }() + } + + err = p.Wait() + wg.Wait() + + // Attempt to get the exit code from the process. + code := 1 + if err == nil { + code, err = p.ExitCode() + if err != nil { + code = 1 + } + } + + if !exec { + // Shutdown the container, waiting 5 minutes before terminating is + // forcefully. + const shutdownTimeout = time.Minute * 5 + waited := false + err = c.hc.Shutdown() + if hcs.IsPending(err) { + select { + case err = <-containerExitCh: + waited = true + case <-time.After(shutdownTimeout): + err = hcs.ErrTimeout + } + } + if hcs.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + err = c.hc.Terminate() + if waited { + err = c.hc.Wait() + } else { + err = <-containerExitCh + } + } + } + + return cli.NewExitError("", code) + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/signalmap.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/signalmap.go new file mode 100644 index 00000000..edc73e01 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/signalmap.go @@ -0,0 +1,46 @@ +package main + +var signalMapLcow = map[string]int{ + "ABRT": 0x6, + "ALRM": 0xe, + "BUS": 0x7, + "CHLD": 0x11, + "CLD": 0x11, + "CONT": 0x12, + "FPE": 0x8, + "HUP": 0x1, + "ILL": 0x4, + "INT": 0x2, + "IO": 0x1d, + "IOT": 0x6, + "KILL": 0x9, + "PIPE": 0xd, + "POLL": 0x1d, + "PROF": 0x1b, + "PWR": 0x1e, + "QUIT": 0x3, + "SEGV": 0xb, + "STKFLT": 0x10, + "STOP": 0x13, + "SYS": 0x1f, + "TERM": 0xf, + "TRAP": 0x5, + "TSTP": 0x14, + "TTIN": 0x15, + "TTOU": 0x16, + "URG": 0x17, + "USR1": 0xa, + "USR2": 0xc, + "VTALRM": 0x1a, + "WINCH": 0x1c, + "XCPU": 0x18, + "XFSZ": 0x19, +} + +var signalMapWindows = map[string]int{ + "CTRLC": 0x0, + "CTRLBREAK": 0x1, + "CTRLCLOSE": 0x2, + "CTRLLOGOFF": 0x5, + "CTRLSHUTDOWN": 0x6, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/spec.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/spec.go new file mode 100644 index 00000000..005afdf4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/spec.go @@ -0,0 +1,42 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/urfave/cli" +) + +// loadSpec loads the specification from the provided path. +func loadSpec(cPath string) (spec *specs.Spec, err error) { + cf, err := os.Open(cPath) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("JSON specification file %s not found", cPath) + } + return nil, err + } + defer cf.Close() + + if err = json.NewDecoder(cf).Decode(&spec); err != nil { + return nil, err + } + return spec, nil +} + +// setupSpec performs initial setup based on the cli.Context for the container +func setupSpec(context *cli.Context) (*specs.Spec, error) { + bundle := context.String("bundle") + if bundle != "" { + if err := os.Chdir(bundle); err != nil { + return nil, err + } + } + spec, err := loadSpec(specConfig) + if err != nil { + return nil, err + } + return spec, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/start.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/start.go new file mode 100644 index 00000000..d5d004cd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/start.go @@ -0,0 +1,43 @@ +package main + +import ( + "errors" + "fmt" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/urfave/cli" +) + +var startCommand = cli.Command{ + Name: "start", + Usage: "executes the user defined process in a created container", + ArgsUsage: ` + +Where "" is your name for the instance of the container that you +are starting. The name you provide for the container instance must be unique on +your host.`, + Description: `The start command executes the user defined process in a created container.`, + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + id := context.Args().First() + container, err := getContainer(id, false) + if err != nil { + return err + } + defer container.Close() + status, err := container.Status() + if err != nil { + return err + } + switch status { + case containerCreated: + return container.Exec() + case containerStopped: + return errors.New("cannot start a container that has stopped") + case containerRunning: + return errors.New("cannot start an already running container") + default: + return fmt.Errorf("cannot start a container in the '%s' state", status) + } + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/state.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/state.go new file mode 100644 index 00000000..bae1c3de --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/state.go @@ -0,0 +1,49 @@ +package main + +import ( + "encoding/json" + "os" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/urfave/cli" +) + +var stateCommand = cli.Command{ + Name: "state", + Usage: "output the state of a container", + ArgsUsage: ` + +Where "" is your name for the instance of the container.`, + Description: `The state command outputs current state information for the +instance of a container.`, + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + id := context.Args().First() + c, err := getContainer(id, false) + if err != nil { + return err + } + defer c.Close() + status, err := c.Status() + if err != nil { + return err + } + cs := runhcs.ContainerState{ + Version: c.Spec.Version, + ID: c.ID, + InitProcessPid: c.ShimPid, + Status: string(status), + Bundle: c.Bundle, + Rootfs: c.Rootfs, + Created: c.Created, + Annotations: c.Spec.Annotations, + } + data, err := json.MarshalIndent(cs, "", " ") + if err != nil { + return err + } + os.Stdout.Write(data) + return nil + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/tty.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/tty.go new file mode 100644 index 00000000..0b9e43e4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/tty.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "strconv" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/urfave/cli" +) + +var resizeTtyCommand = cli.Command{ + Name: "resize-tty", + Usage: "resize-tty updates the terminal size for a container process", + ArgsUsage: ` `, + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "pid, p", + Usage: "the process pid (defaults to init pid)", + }, + }, + Before: appargs.Validate( + argID, + appargs.Int(10, 1, 65535), + appargs.Int(10, 1, 65535), + ), + Action: func(context *cli.Context) error { + id := context.Args()[0] + width, _ := strconv.ParseUint(context.Args()[1], 10, 16) + height, _ := strconv.ParseUint(context.Args()[2], 10, 16) + c, err := getContainer(id, true) + if err != nil { + return err + } + defer c.Close() + + pid := context.Int("pid") + if pid == 0 { + if err := stateKey.Get(id, keyInitPid, &pid); err != nil { + return err + } + } else { + // If a pid was provided map it to its hcs pid. + if err := stateKey.Get(id, fmt.Sprintf(keyPidMapFmt, pid), &pid); err != nil { + return err + } + } + + p, err := c.hc.OpenProcess(pid) + if err != nil { + return err + } + defer p.Close() + + return p.ResizeConsole(uint16(width), uint16(height)) + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/utils.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/utils.go new file mode 100644 index 00000000..846dd733 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/utils.go @@ -0,0 +1,52 @@ +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "strings" + + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/runhcs" +) + +var argID = appargs.NonEmptyString + +func absPathOrEmpty(path string) (string, error) { + if path == "" { + return "", nil + } + if strings.HasPrefix(path, runhcs.SafePipePrefix) { + if len(path) > len(runhcs.SafePipePrefix) { + return runhcs.SafePipePath(path[len(runhcs.SafePipePrefix):]), nil + } + } + return filepath.Abs(path) +} + +// createPidFile creates a file with the processes pid inside it atomically +// it creates a temp file with the paths filename + '.' infront of it +// then renames the file +func createPidFile(path string, pid int) error { + var ( + tmpDir = filepath.Dir(path) + tmpName = filepath.Join(tmpDir, fmt.Sprintf(".%s", filepath.Base(path))) + ) + f, err := os.OpenFile(tmpName, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666) + if err != nil { + return err + } + _, err = fmt.Fprintf(f, "%d", pid) + f.Close() + if err != nil { + return err + } + return os.Rename(tmpName, path) +} + +func closeWritePipe(pipe net.Conn) error { + return pipe.(interface { + CloseWrite() error + }).CloseWrite() +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/utils_test.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/utils_test.go new file mode 100644 index 00000000..8fbebcda --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/utils_test.go @@ -0,0 +1,39 @@ +package main + +import ( + "os" + "testing" + + "github.com/Microsoft/hcsshim/internal/runhcs" +) + +func Test_AbsPathOrEmpty(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get test wd: %v", err) + } + + tests := []string{ + "", + runhcs.SafePipePrefix + "test", + runhcs.SafePipePrefix + "test with spaces", + "test", + "C:\\test..\\test", + } + expected := []string{ + "", + runhcs.SafePipePrefix + "test", + runhcs.SafePipePrefix + "test%20with%20spaces", + wd + "\\test", + "C:\\test..\\test", + } + for i, test := range tests { + actual, err := absPathOrEmpty(test) + if err != nil { + t.Fatalf("absPathOrEmpty: error '%v'", err) + } + if actual != expected[i] { + t.Fatalf("absPathOrEmpty: actual '%s' != '%s'", actual, expected[i]) + } + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/versioninfo.json b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/versioninfo.json new file mode 100644 index 00000000..a9394a40 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/versioninfo.json @@ -0,0 +1,43 @@ +{ + "FixedFileInfo": { + "FileVersion": { + "Major": 1, + "Minor": 0, + "Patch": 0, + "Build": 0 + }, + "ProductVersion": { + "Major": 1, + "Minor": 0, + "Patch": 0, + "Build": 0 + }, + "FileFlagsMask": "3f", + "FileFlags ": "00", + "FileOS": "040004", + "FileType": "01", + "FileSubType": "00" + }, + "StringFileInfo": { + "Comments": "", + "CompanyName": "", + "FileDescription": "", + "FileVersion": "", + "InternalName": "", + "LegalCopyright": "", + "LegalTrademarks": "", + "OriginalFilename": "", + "PrivateBuild": "", + "ProductName": "", + "ProductVersion": "v1.0.0.0", + "SpecialBuild": "" + }, + "VarFileInfo": { + "Translation": { + "LangID": "0409", + "CharsetID": "04B0" + } + }, + "IconPath": "", + "ManifestPath": "runhcs.exe.manifest" +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/vm.go b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/vm.go new file mode 100644 index 00000000..582a6009 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/runhcs/vm.go @@ -0,0 +1,209 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "syscall" + + winio "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +func vmID(id string) string { + return id + "@vm" +} + +var vmshimCommand = cli.Command{ + Name: "vmshim", + Usage: `launch a VM and containers inside it (do not call it outside of runhcs)`, + Hidden: true, + Flags: []cli.Flag{ + cli.StringFlag{Name: "log-pipe", Hidden: true}, + cli.StringFlag{Name: "os", Hidden: true}, + }, + Before: appargs.Validate(argID), + Action: func(context *cli.Context) error { + logPipe := context.String("log-pipe") + if logPipe != "" { + lpc, err := winio.DialPipe(logPipe, nil) + if err != nil { + return err + } + defer lpc.Close() + logrus.SetOutput(lpc) + } else { + logrus.SetOutput(os.Stderr) + } + fatalWriter.Writer = os.Stdout + + pipePath := context.Args().First() + + optsj, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + os.Stdin.Close() + + var opts interface{} + isLCOW := context.String("os") == "linux" + if isLCOW { + opts = &uvm.OptionsLCOW{} + } else { + opts = &uvm.OptionsWCOW{} + } + + err = json.Unmarshal(optsj, opts) + if err != nil { + return err + } + + // Listen on the named pipe associated with this VM. + l, err := winio.ListenPipe(pipePath, &winio.PipeConfig{MessageMode: true}) + if err != nil { + return err + } + + var vm *uvm.UtilityVM + if isLCOW { + vm, err = uvm.CreateLCOW(opts.(*uvm.OptionsLCOW)) + } else { + vm, err = uvm.CreateWCOW(opts.(*uvm.OptionsWCOW)) + } + if err != nil { + return err + } + defer vm.Close() + if err = vm.Start(); err != nil { + return err + } + + // Asynchronously wait for the VM to exit. + exitCh := make(chan error) + go func() { + exitCh <- vm.Wait() + }() + + defer vm.Terminate() + + // Alert the parent process that initialization has completed + // successfully. + os.Stdout.Write(runhcs.ShimSuccess) + os.Stdout.Close() + fatalWriter.Writer = ioutil.Discard + + pipeCh := make(chan net.Conn) + go func() { + for { + conn, err := l.Accept() + if err != nil { + logrus.Error(err) + continue + } + pipeCh <- conn + } + }() + + for { + select { + case <-exitCh: + return nil + case pipe := <-pipeCh: + err = processRequest(vm, pipe) + if err == nil { + _, err = pipe.Write(runhcs.ShimSuccess) + // Wait until the pipe is closed before closing the + // container so that it is properly handed off to the other + // process. + if err == nil { + err = closeWritePipe(pipe) + } + if err == nil { + ioutil.ReadAll(pipe) + } + } else { + logrus.WithError(err). + Error("failed creating container in VM") + fmt.Fprintf(pipe, "%v", err) + } + pipe.Close() + } + } + }, +} + +func processRequest(vm *uvm.UtilityVM, pipe net.Conn) error { + var req runhcs.VMRequest + err := json.NewDecoder(pipe).Decode(&req) + if err != nil { + return err + } + logrus.WithFields(logrus.Fields{ + logfields.ContainerID: req.ID, + logfields.VMShimOperation: req.Op, + }).Debug("process request") + c, err := getContainer(req.ID, false) + if err != nil { + return err + } + defer func() { + if c != nil { + c.Close() + } + }() + switch req.Op { + case runhcs.OpCreateContainer: + err = createContainerInHost(c, vm) + if err != nil { + return err + } + c2 := c + c = nil + go func() { + c2.hc.Wait() + c2.Close() + }() + + case runhcs.OpUnmountContainer, runhcs.OpUnmountContainerDiskOnly: + err = c.unmountInHost(vm, req.Op == runhcs.OpUnmountContainer) + if err != nil { + return err + } + + case runhcs.OpSyncNamespace: + return errors.New("Not implemented") + default: + panic("unknown operation") + } + return nil +} + +type noVMError struct { + ID string +} + +func (err *noVMError) Error() string { + return "VM " + err.ID + " cannot be contacted" +} + +func (c *container) issueVMRequest(op runhcs.VMRequestOp) error { + req := runhcs.VMRequest{ + ID: c.ID, + Op: op, + } + if err := runhcs.IssueVMRequest(c.VMPipePath(), &req); err != nil { + if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { + return &noVMError{c.HostID} + } + return err + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/tar2ext4/tar2ext4.go b/vendor/github.com/Microsoft/hcsshim/cmd/tar2ext4/tar2ext4.go new file mode 100644 index 00000000..9f298d2a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/tar2ext4/tar2ext4.go @@ -0,0 +1,64 @@ +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/Microsoft/hcsshim/ext4/tar2ext4" +) + +var ( + input = flag.String("i", "", "input file") + output = flag.String("o", "", "output file") + overlay = flag.Bool("overlay", false, "produce overlayfs-compatible layer image") + vhd = flag.Bool("vhd", false, "add a VHD footer to the end of the image") + inlineData = flag.Bool("inline", false, "write small file data into the inode; not compatible with DAX") +) + +func main() { + flag.Parse() + if flag.NArg() != 0 || len(*output) == 0 { + flag.Usage() + os.Exit(1) + } + + err := func() (err error) { + in := os.Stdin + if *input != "" { + in, err = os.Open(*input) + if err != nil { + return err + } + } + out, err := os.Create(*output) + if err != nil { + return err + } + + var opts []tar2ext4.Option + if *overlay { + opts = append(opts, tar2ext4.ConvertWhiteout) + } + if *vhd { + opts = append(opts, tar2ext4.AppendVhdFooter) + } + if *inlineData { + opts = append(opts, tar2ext4.InlineData) + } + err = tar2ext4.Convert(in, out, opts...) + if err != nil { + return err + } + + // Exhaust the tar stream. + io.Copy(ioutil.Discard, in) + return nil + }() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/create.go b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/create.go new file mode 100644 index 00000000..5a48cbc3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/create.go @@ -0,0 +1,36 @@ +package main + +import ( + "path/filepath" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/urfave/cli" +) + +var createCommand = cli.Command{ + Name: "create", + Usage: "creates a new writable container layer", + Flags: []cli.Flag{ + cli.StringSliceFlag{ + Name: "layer, l", + Usage: "paths to the read-only parent layers", + }, + }, + ArgsUsage: "", + Before: appargs.Validate(appargs.NonEmptyString), + Action: func(context *cli.Context) error { + path, err := filepath.Abs(context.Args().First()) + if err != nil { + return err + } + + layers, err := normalizeLayers(context.StringSlice("layer"), true) + if err != nil { + return err + } + + di := driverInfo + return hcsshim.CreateScratchLayer(di, path, layers[len(layers)-1], layers) + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/export.go b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/export.go new file mode 100644 index 00000000..be885f47 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/export.go @@ -0,0 +1,66 @@ +package main + +import ( + "compress/gzip" + "io" + "os" + "path/filepath" + + winio "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/ociwclayer" + "github.com/urfave/cli" +) + +var exportCommand = cli.Command{ + Name: "export", + Usage: "exports a layer to a tar file", + Flags: []cli.Flag{ + cli.StringSliceFlag{ + Name: "layer, l", + Usage: "paths to the read-only parent layers", + }, + cli.StringFlag{ + Name: "output, o", + Usage: "output layer tar (defaults to stdout)", + }, + cli.BoolFlag{ + Name: "gzip, z", + Usage: "compress output with gzip compression", + }, + }, + ArgsUsage: "", + Before: appargs.Validate(appargs.NonEmptyString), + Action: func(context *cli.Context) (err error) { + path, err := filepath.Abs(context.Args().First()) + if err != nil { + return err + } + + layers, err := normalizeLayers(context.StringSlice("layer"), true) + if err != nil { + return err + } + + err = winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege}) + if err != nil { + return err + } + + fp := context.String("output") + f := os.Stdout + if fp != "" { + f, err = os.Create(fp) + if err != nil { + return err + } + defer f.Close() + } + w := io.Writer(f) + if context.Bool("gzip") { + w = gzip.NewWriter(w) + } + + return ociwclayer.ExportLayer(w, path, layers) + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/import.go b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/import.go new file mode 100644 index 00000000..470deb71 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/import.go @@ -0,0 +1,74 @@ +package main + +import ( + "bufio" + "compress/gzip" + "io" + "os" + "path/filepath" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/Microsoft/hcsshim/internal/ociwclayer" + "github.com/urfave/cli" +) + +var importCommand = cli.Command{ + Name: "import", + Usage: "imports a layer from a tar file", + Flags: []cli.Flag{ + cli.StringSliceFlag{ + Name: "layer, l", + Usage: "paths to the read-only parent layers", + }, + cli.StringFlag{ + Name: "input, i", + Usage: "input layer tar (defaults to stdin)", + }, + }, + ArgsUsage: "", + Before: appargs.Validate(appargs.NonEmptyString), + Action: func(context *cli.Context) (err error) { + path, err := filepath.Abs(context.Args().First()) + if err != nil { + return err + } + + layers, err := normalizeLayers(context.StringSlice("layer"), false) + if err != nil { + return err + } + + fp := context.String("input") + f := os.Stdin + if fp != "" { + f, err = os.Open(fp) + if err != nil { + return err + } + defer f.Close() + } + r, err := addDecompressor(f) + if err != nil { + return err + } + err = winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return err + } + _, err = ociwclayer.ImportLayer(r, path, layers) + return err + }, +} + +func addDecompressor(r io.Reader) (io.Reader, error) { + b := bufio.NewReader(r) + hdr, err := b.Peek(3) + if err != nil { + return nil, err + } + if hdr[0] == 0x1f && hdr[1] == 0x8b && hdr[2] == 8 { + return gzip.NewReader(b) + } + return b, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/mount.go b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/mount.go new file mode 100644 index 00000000..63a699b3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/mount.go @@ -0,0 +1,88 @@ +package main + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/internal/appargs" + "github.com/urfave/cli" +) + +var mountCommand = cli.Command{ + Name: "mount", + Usage: "mounts a scratch", + ArgsUsage: "", + Flags: []cli.Flag{ + cli.StringSliceFlag{ + Name: "layer, l", + Usage: "paths to the parent layers for this layer", + }, + }, + Action: func(context *cli.Context) (err error) { + if context.NArg() != 1 { + return errors.New("invalid usage") + } + path, err := filepath.Abs(context.Args().First()) + if err != nil { + return err + } + + layers, err := normalizeLayers(context.StringSlice("layer"), true) + if err != nil { + return err + } + + err = hcsshim.ActivateLayer(driverInfo, path) + if err != nil { + return err + } + defer func() { + if err != nil { + hcsshim.DeactivateLayer(driverInfo, path) + } + }() + + err = hcsshim.PrepareLayer(driverInfo, path, layers) + if err != nil { + return err + } + defer func() { + if err != nil { + hcsshim.UnprepareLayer(driverInfo, path) + } + }() + + mountPath, err := hcsshim.GetLayerMountPath(driverInfo, path) + if err != nil { + return err + } + _, err = fmt.Println(mountPath) + return err + }, +} + +var unmountCommand = cli.Command{ + Name: "unmount", + Usage: "unmounts a scratch", + ArgsUsage: "", + Before: appargs.Validate(appargs.NonEmptyString), + Action: func(context *cli.Context) (err error) { + path, err := filepath.Abs(context.Args().First()) + if err != nil { + return err + } + + err = hcsshim.UnprepareLayer(driverInfo, path) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } + err = hcsshim.DeactivateLayer(driverInfo, path) + if err != nil { + return err + } + return nil + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/remove.go b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/remove.go new file mode 100644 index 00000000..db5f73df --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/remove.go @@ -0,0 +1,31 @@ +package main + +import ( + "path/filepath" + + winio "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/internal/appargs" + + "github.com/urfave/cli" +) + +var removeCommand = cli.Command{ + Name: "remove", + Usage: "permanently removes a layer directory in its entirety", + ArgsUsage: "", + Before: appargs.Validate(appargs.NonEmptyString), + Action: func(context *cli.Context) (err error) { + path, err := filepath.Abs(context.Args().First()) + if err != nil { + return err + } + + err = winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return err + } + + return hcsshim.DestroyLayer(driverInfo, path) + }, +} diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/resource_windows_386.syso b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/resource_windows_386.syso new file mode 100644 index 0000000000000000000000000000000000000000..b974a7ead661d38ec39e143b14892c108a77f86d GIT binary patch literal 969 zcma)5!EVz)5FNLXP?3;0^uTekdfmi^L^zexAVe!gLbReK9MQ&Jw=3DZme)y%P^EqX ze*k{~5?uKv{s8f8$C67pF!t=s&b)auUfZu6hbsBLOVoHhr(f72s-w*kAs52^3|q+# z^m%<(yH|a~acy5gFPwhJQnr$XRYp%-zwfzB>d3@OUryY&7biS$ znJtBmMQXI1xNB+M>CuDxp|DnFi*(IEpzXw67W&XeS27cpXDTX;HA%@MlN}11^;f>j zGNDx>Z8`6D0~LdWv9wX4@)BblT}P={%OV_fgBu(Ob5W{=N>%wEz3;%QDK`D;o+8gv z6@d*s8~Q3-N%)v^_D<>8TwCUQjPt(%cA4jULZwnJw4x;((Z*i4E7`l2*GY*GQcs-u z1NZ}w;L11g2Z(1okzB%ov1ey?=FOY&+D<+?RLl1sQR}%*zqDoCMB61sE`s|Rww68U zv*xaMTiK)ga2e6<1K4v|n2tCNfB291zPl4dU5mfoNjX&GKLPOtde{yPkLU;dubus& zGemSw7xWhE3wi}_MrZUA`x(|#__h3WfBkNsep}qyX_F;#acwi35LIv-@KscjB28Cd zQTT>jeU-lD5QPF&rxY8p(Vv&nWM5K3l4*Nq7?XGJ4_#ecxkJ$0kwwYT~`VJmsOs zY$bFeGNa|hTTAOrj}Pue!djUxvNZ#Nwi9nz>LVLp%UoEVtGG1Qq!o`%ek5$(Uj-h^ zg;uGw)x6scR16X&(#EAKDvWV_6K7&A%V^LIZg3zhM5PugQ`LX;z5}nP*z{|9ilWF= z3^w#^>FaPU(L>JJ8>JI-V_D!c&i@A3Wl@;2lF8Wxn + + wclayer + + + + + + + diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/wclayer.go new file mode 100644 index 00000000..cc8c0dca --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/cmd/wclayer/wclayer.go @@ -0,0 +1,60 @@ +package main + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/Microsoft/hcsshim" + "github.com/urfave/cli" +) + +// Add a manifest to get proper Windows version detection. +// +// goversioninfo can be installed with "go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo" + +//go:generate goversioninfo -platform-specific + +var usage = `Windows Container layer utility + +wclayer is a command line tool for manipulating Windows Container +storage layers. It can import and export layers from and to OCI format +layer tar files, create new writable layers, and mount and unmount +container images.` + +var driverInfo = hcsshim.DriverInfo{} + +func main() { + app := cli.NewApp() + app.Name = "wclayer" + app.Commands = []cli.Command{ + createCommand, + exportCommand, + importCommand, + mountCommand, + removeCommand, + unmountCommand, + } + app.Usage = usage + + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func normalizeLayers(il []string, needOne bool) ([]string, error) { + if needOne && len(il) == 0 { + return nil, errors.New("at least one read-only layer must be specified") + } + ol := make([]string, len(il)) + for i := range il { + var err error + ol[i], err = filepath.Abs(il[i]) + if err != nil { + return nil, err + } + } + return ol, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go new file mode 100644 index 00000000..e142c315 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -0,0 +1,192 @@ +package hcsshim + +import ( + "fmt" + "os" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/mergemaps" + "github.com/Microsoft/hcsshim/internal/schema1" +) + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties = schema1.ContainerProperties + +// MemoryStats holds the memory statistics for a container +type MemoryStats = schema1.MemoryStats + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats = schema1.ProcessorStats + +// StorageStats holds the storage statistics for a container +type StorageStats = schema1.StorageStats + +// NetworkStats holds the network statistics for a container +type NetworkStats = schema1.NetworkStats + +// Statistics is the structure returned by a statistics call on a container +type Statistics = schema1.Statistics + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem = schema1.ProcessListItem + +// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container +type MappedVirtualDiskController = schema1.MappedVirtualDiskController + +// Type of Request Support in ModifySystem +type RequestType = schema1.RequestType + +// Type of Resource Support in ModifySystem +type ResourceType = schema1.ResourceType + +// RequestType const +const ( + Add = schema1.Add + Remove = schema1.Remove + Network = schema1.Network +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse + +type container struct { + system *hcs.System +} + +// createComputeSystemAdditionalJSON is read from the environment at initialisation +// time. It allows an environment variable to define additional JSON which +// is merged in the CreateComputeSystem call to HCS. +var createContainerAdditionalJSON []byte + +func init() { + createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")) +} + +// CreateContainer creates a new container with the given configuration but does not start it. +func CreateContainer(id string, c *ContainerConfig) (Container, error) { + fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) + if err != nil { + return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) + } + + system, err := hcs.CreateComputeSystem(id, fullConfig) + if err != nil { + return nil, err + } + return &container{system}, err +} + +// OpenContainer opens an existing container by ID. +func OpenContainer(id string) (Container, error) { + system, err := hcs.OpenComputeSystem(id) + if err != nil { + return nil, err + } + return &container{system}, err +} + +// GetContainers gets a list of the containers on the system that match the query +func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) { + return hcs.GetComputeSystems(q) +} + +// Start synchronously starts the container. +func (container *container) Start() error { + return convertSystemError(container.system.Start(), container) +} + +// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. +func (container *container) Shutdown() error { + return convertSystemError(container.system.Shutdown(), container) +} + +// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. +func (container *container) Terminate() error { + return convertSystemError(container.system.Terminate(), container) +} + +// Waits synchronously waits for the container to shutdown or terminate. +func (container *container) Wait() error { + return convertSystemError(container.system.Wait(), container) +} + +// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It +// returns false if timeout occurs. +func (container *container) WaitTimeout(t time.Duration) error { + return convertSystemError(container.system.WaitTimeout(t), container) +} + +// Pause pauses the execution of a container. +func (container *container) Pause() error { + return convertSystemError(container.system.Pause(), container) +} + +// Resume resumes the execution of a container. +func (container *container) Resume() error { + return convertSystemError(container.system.Resume(), container) +} + +// HasPendingUpdates returns true if the container has updates pending to install +func (container *container) HasPendingUpdates() (bool, error) { + return false, nil +} + +// Statistics returns statistics for the container. This is a legacy v1 call +func (container *container) Statistics() (Statistics, error) { + properties, err := container.system.Properties(schema1.PropertyTypeStatistics) + if err != nil { + return Statistics{}, convertSystemError(err, container) + } + + return properties.Statistics, nil +} + +// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call +func (container *container) ProcessList() ([]ProcessListItem, error) { + properties, err := container.system.Properties(schema1.PropertyTypeProcessList) + if err != nil { + return nil, convertSystemError(err, container) + } + + return properties.ProcessList, nil +} + +// This is a legacy v1 call +func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) { + properties, err := container.system.Properties(schema1.PropertyTypeMappedVirtualDisk) + if err != nil { + return nil, convertSystemError(err, container) + } + + return properties.MappedVirtualDiskControllers, nil +} + +// CreateProcess launches a new process within the container. +func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { + p, err := container.system.CreateProcess(c) + if err != nil { + return nil, convertSystemError(err, container) + } + return &process{p}, nil +} + +// OpenProcess gets an interface to an existing process within the container. +func (container *container) OpenProcess(pid int) (Process, error) { + p, err := container.system.OpenProcess(pid) + if err != nil { + return nil, convertSystemError(err, container) + } + return &process{p}, nil +} + +// Close cleans up any state associated with the container but does not terminate or wait for it. +func (container *container) Close() error { + return convertSystemError(container.system.Close(), container) +} + +// Modify the System +func (container *container) Modify(config *ResourceModificationRequestResponse) error { + return convertSystemError(container.system.Modify(config), container) +} diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go new file mode 100644 index 00000000..63efa23c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -0,0 +1,257 @@ +package hcsshim + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/hcsshim/internal/hns" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcserror" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist + ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = hcs.ErrElementNotFound + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = hcs.ErrNotSupported + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = hcs.ErrInvalidData + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = hcs.ErrHandleClose + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = hcs.ErrAlreadyClosed + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = hcs.ErrInvalidNotificationType + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = hcs.ErrInvalidProcessState + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = hcs.ErrTimeout + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = hcs.ErrUnexpectedValue + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState + + // ErrProcNotFound is an error encountered when the the process cannot be found + ErrProcNotFound = hcs.ErrProcNotFound + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = hcs.ErrPlatformNotSupported +) + +type EndpointNotFoundError = hns.EndpointNotFoundError +type NetworkNotFoundError = hns.NetworkNotFoundError + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + Process *process + Operation string + ExtraInfo string + Err error + Events []hcs.ErrorEvent +} + +// ContainerError is an error encountered in HCS during an operation on a Container object +type ContainerError struct { + Container *container + Operation string + ExtraInfo string + Err error + Events []hcs.ErrorEvent +} + +func (e *ContainerError) Error() string { + if e == nil { + return "" + } + + if e.Container == nil { + return "unexpected nil container for error: " + e.Err.Error() + } + + s := "container " + e.Container.system.ID() + + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + for _, ev := range e.Events { + s += "\n" + ev.String() + } + + if e.ExtraInfo != "" { + s += " extra info: " + e.ExtraInfo + } + + return s +} + +func makeContainerError(container *container, operation string, extraInfo string, err error) error { + // Don't double wrap errors + if _, ok := err.(*ContainerError); ok { + return err + } + containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err} + return containerError +} + +func (e *ProcessError) Error() string { + if e == nil { + return "" + } + + if e.Process == nil { + return "Unexpected nil process for error: " + e.Err.Error() + } + + s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID()) + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + for _, ev := range e.Events { + s += "\n" + ev.String() + } + + return s +} + +func makeProcessError(process *process, operation string, extraInfo string, err error) error { + // Don't double wrap errors + if _, ok := err.(*ProcessError); ok { + return err + } + processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err} + return processError +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +func IsNotExist(err error) bool { + if _, ok := err.(EndpointNotFoundError); ok { + return true + } + if _, ok := err.(NetworkNotFoundError); ok { + return true + } + return hcs.IsNotExist(getInnerError(err)) +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + return hcs.IsAlreadyClosed(getInnerError(err)) +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + return hcs.IsPending(getInnerError(err)) +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + return hcs.IsTimeout(getInnerError(err)) +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +func IsAlreadyStopped(err error) bool { + return hcs.IsAlreadyStopped(getInnerError(err)) +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + return hcs.IsNotSupported(getInnerError(err)) +} + +func getInnerError(err error) error { + switch pe := err.(type) { + case nil: + return nil + case *ContainerError: + err = pe.Err + case *ProcessError: + err = pe.Err + } + return err +} + +func convertSystemError(err error, c *container) error { + if serr, ok := err.(*hcs.SystemError); ok { + return &ContainerError{Container: c, Operation: serr.Op, ExtraInfo: serr.Extra, Err: serr.Err, Events: serr.Events} + } + return err +} + +func convertProcessError(err error, p *process) error { + if perr, ok := err.(*hcs.ProcessError); ok { + return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go b/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go new file mode 100644 index 00000000..f2274fd4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact.go @@ -0,0 +1,1263 @@ +package compactext4 + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "path" + "sort" + "strings" + "time" + + "github.com/Microsoft/hcsshim/ext4/internal/format" +) + +// Writer writes a compact ext4 file system. +type Writer struct { + f io.ReadWriteSeeker + bw *bufio.Writer + inodes []*inode + curName string + curInode *inode + pos int64 + dataWritten, dataMax int64 + err error + initialized bool + supportInlineData bool + maxDiskSize int64 + gdBlocks uint32 +} + +// Mode flags for Linux files. +const ( + S_IXOTH = format.S_IXOTH + S_IWOTH = format.S_IWOTH + S_IROTH = format.S_IROTH + S_IXGRP = format.S_IXGRP + S_IWGRP = format.S_IWGRP + S_IRGRP = format.S_IRGRP + S_IXUSR = format.S_IXUSR + S_IWUSR = format.S_IWUSR + S_IRUSR = format.S_IRUSR + S_ISVTX = format.S_ISVTX + S_ISGID = format.S_ISGID + S_ISUID = format.S_ISUID + S_IFIFO = format.S_IFIFO + S_IFCHR = format.S_IFCHR + S_IFDIR = format.S_IFDIR + S_IFBLK = format.S_IFBLK + S_IFREG = format.S_IFREG + S_IFLNK = format.S_IFLNK + S_IFSOCK = format.S_IFSOCK + + TypeMask = format.TypeMask +) + +type inode struct { + Size int64 + Atime, Ctime, Mtime, Crtime uint64 + Number format.InodeNumber + Mode uint16 + Uid, Gid uint32 + LinkCount uint32 + XattrBlock uint32 + BlockCount uint32 + Devmajor, Devminor uint32 + Flags format.InodeFlag + Data []byte + XattrInline []byte + Children directory +} + +func (node *inode) FileType() uint16 { + return node.Mode & format.TypeMask +} + +func (node *inode) IsDir() bool { + return node.FileType() == S_IFDIR +} + +// A File represents a file to be added to an ext4 file system. +type File struct { + Linkname string + Size int64 + Mode uint16 + Uid, Gid uint32 + Atime, Ctime, Mtime, Crtime time.Time + Devmajor, Devminor uint32 + Xattrs map[string][]byte +} + +const ( + inodeFirst = 11 + inodeLostAndFound = inodeFirst + + blockSize = 4096 + blocksPerGroup = blockSize * 8 + inodeSize = 256 + maxInodesPerGroup = blockSize * 8 // Limited by the inode bitmap + inodesPerGroupIncrement = blockSize / inodeSize + + defaultMaxDiskSize = 16 * 1024 * 1024 * 1024 // 16GB + maxMaxDiskSize = 16 * 1024 * 1024 * 1024 * 1024 // 16TB + + groupDescriptorSize = 32 // Use the small group descriptor + groupsPerDescriptorBlock = blockSize / groupDescriptorSize + + maxFileSize = 128 * 1024 * 1024 * 1024 // 128GB file size maximum for now + smallSymlinkSize = 59 // max symlink size that goes directly in the inode + maxBlocksPerExtent = 0x8000 // maximum number of blocks in an extent + inodeDataSize = 60 + inodeUsedSize = 152 // fields through CrtimeExtra + inodeExtraSize = inodeSize - inodeUsedSize + xattrInodeOverhead = 4 + 4 // magic number + empty next entry value + xattrBlockOverhead = 32 + 4 // header + empty next entry value + inlineDataXattrOverhead = xattrInodeOverhead + 16 + 4 // entry + "data" + inlineDataSize = inodeDataSize + inodeExtraSize - inlineDataXattrOverhead +) + +type exceededMaxSizeError struct { + Size int64 +} + +func (err exceededMaxSizeError) Error() string { + return fmt.Sprintf("disk exceeded maximum size of %d bytes", err.Size) +} + +var directoryEntrySize = binary.Size(format.DirectoryEntry{}) +var extraIsize = uint16(inodeUsedSize - 128) + +type directory map[string]*inode + +func splitFirst(p string) (string, string) { + n := strings.IndexByte(p, '/') + if n >= 0 { + return p[:n], p[n+1:] + } + return p, "" +} + +func (w *Writer) findPath(root *inode, p string) *inode { + inode := root + for inode != nil && len(p) != 0 { + name, rest := splitFirst(p) + p = rest + inode = inode.Children[name] + } + return inode +} + +func timeToFsTime(t time.Time) uint64 { + if t.IsZero() { + return 0 + } + s := t.Unix() + if s < -0x80000000 { + return 0x80000000 + } + if s > 0x37fffffff { + return 0x37fffffff + } + return uint64(s) | uint64(t.Nanosecond())<<34 +} + +func fsTimeToTime(t uint64) time.Time { + if t == 0 { + return time.Time{} + } + s := int64(t & 0x3ffffffff) + if s > 0x7fffffff && s < 0x100000000 { + s = int64(int32(uint32(s))) + } + return time.Unix(s, int64(t>>34)) +} + +func (w *Writer) getInode(i format.InodeNumber) *inode { + if i == 0 || int(i) > len(w.inodes) { + return nil + } + return w.inodes[i-1] +} + +var xattrPrefixes = []struct { + Index uint8 + Prefix string +}{ + {2, "system.posix_acl_access"}, + {3, "system.posix_acl_default"}, + {8, "system.richacl"}, + {7, "system."}, + {1, "user."}, + {4, "trusted."}, + {6, "security."}, +} + +func compressXattrName(name string) (uint8, string) { + for _, p := range xattrPrefixes { + if strings.HasPrefix(name, p.Prefix) { + return p.Index, name[len(p.Prefix):] + } + } + return 0, name +} + +func decompressXattrName(index uint8, name string) string { + for _, p := range xattrPrefixes { + if index == p.Index { + return p.Prefix + name + } + } + return name +} + +func hashXattrEntry(name string, value []byte) uint32 { + var hash uint32 + for i := 0; i < len(name); i++ { + hash = (hash << 5) ^ (hash >> 27) ^ uint32(name[i]) + } + + for i := 0; i+3 < len(value); i += 4 { + hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(value[i:i+4]) + } + + if len(value)%4 != 0 { + var last [4]byte + copy(last[:], value[len(value)&^3:]) + hash = (hash << 16) ^ (hash >> 16) ^ binary.LittleEndian.Uint32(last[:]) + } + return hash +} + +type xattr struct { + Name string + Index uint8 + Value []byte +} + +func (x *xattr) EntryLen() int { + return (len(x.Name)+3)&^3 + 16 +} + +func (x *xattr) ValueLen() int { + return (len(x.Value) + 3) &^ 3 +} + +type xattrState struct { + inode, block []xattr + inodeLeft, blockLeft int +} + +func (s *xattrState) init() { + s.inodeLeft = inodeExtraSize - xattrInodeOverhead + s.blockLeft = blockSize - xattrBlockOverhead +} + +func (s *xattrState) addXattr(name string, value []byte) bool { + index, name := compressXattrName(name) + x := xattr{ + Index: index, + Name: name, + Value: value, + } + length := x.EntryLen() + x.ValueLen() + if s.inodeLeft >= length { + s.inode = append(s.inode, x) + s.inodeLeft -= length + } else if s.blockLeft >= length { + s.block = append(s.block, x) + s.blockLeft -= length + } else { + return false + } + return true +} + +func putXattrs(xattrs []xattr, b []byte, offsetDelta uint16) { + offset := uint16(len(b)) + offsetDelta + eb := b + db := b + for _, xattr := range xattrs { + vl := xattr.ValueLen() + offset -= uint16(vl) + eb[0] = uint8(len(xattr.Name)) + eb[1] = xattr.Index + binary.LittleEndian.PutUint16(eb[2:], offset) + binary.LittleEndian.PutUint32(eb[8:], uint32(len(xattr.Value))) + binary.LittleEndian.PutUint32(eb[12:], hashXattrEntry(xattr.Name, xattr.Value)) + copy(eb[16:], xattr.Name) + eb = eb[xattr.EntryLen():] + copy(db[len(db)-vl:], xattr.Value) + db = db[:len(db)-vl] + } +} + +func getXattrs(b []byte, xattrs map[string][]byte, offsetDelta uint16) { + eb := b + for len(eb) != 0 { + nameLen := eb[0] + if nameLen == 0 { + break + } + index := eb[1] + offset := binary.LittleEndian.Uint16(eb[2:]) - offsetDelta + valueLen := binary.LittleEndian.Uint32(eb[8:]) + attr := xattr{ + Index: index, + Name: string(eb[16 : 16+nameLen]), + Value: b[offset : uint32(offset)+valueLen], + } + xattrs[decompressXattrName(index, attr.Name)] = attr.Value + eb = eb[attr.EntryLen():] + } +} + +func (w *Writer) writeXattrs(inode *inode, state *xattrState) error { + // Write the inline attributes. + if len(state.inode) != 0 { + inode.XattrInline = make([]byte, inodeExtraSize) + binary.LittleEndian.PutUint32(inode.XattrInline[0:], format.XAttrHeaderMagic) // Magic + putXattrs(state.inode, inode.XattrInline[4:], 0) + } + + // Write the block attributes. If there was previously an xattr block, then + // rewrite it even if it is now empty. + if len(state.block) != 0 || inode.XattrBlock != 0 { + sort.Slice(state.block, func(i, j int) bool { + return state.block[i].Index < state.block[j].Index || + len(state.block[i].Name) < len(state.block[j].Name) || + state.block[i].Name < state.block[j].Name + }) + + var b [blockSize]byte + binary.LittleEndian.PutUint32(b[0:], format.XAttrHeaderMagic) // Magic + binary.LittleEndian.PutUint32(b[4:], 1) // ReferenceCount + binary.LittleEndian.PutUint32(b[8:], 1) // Blocks + putXattrs(state.block, b[32:], 32) + + orig := w.block() + if inode.XattrBlock == 0 { + inode.XattrBlock = orig + inode.BlockCount++ + } else { + // Reuse the original block. + w.seekBlock(inode.XattrBlock) + defer w.seekBlock(orig) + } + + if _, err := w.write(b[:]); err != nil { + return err + } + } + + return nil +} + +func (w *Writer) write(b []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + if w.pos+int64(len(b)) > w.maxDiskSize { + w.err = exceededMaxSizeError{w.maxDiskSize} + return 0, w.err + } + n, err := w.bw.Write(b) + w.pos += int64(n) + w.err = err + return n, err +} + +func (w *Writer) zero(n int64) (int64, error) { + if w.err != nil { + return 0, w.err + } + if w.pos+int64(n) > w.maxDiskSize { + w.err = exceededMaxSizeError{w.maxDiskSize} + return 0, w.err + } + n, err := io.CopyN(w.bw, zero, n) + w.pos += n + w.err = err + return n, err +} + +func (w *Writer) makeInode(f *File, node *inode) (*inode, error) { + mode := f.Mode + if mode&format.TypeMask == 0 { + mode |= format.S_IFREG + } + typ := mode & format.TypeMask + ino := format.InodeNumber(len(w.inodes) + 1) + if node == nil { + node = &inode{ + Number: ino, + } + if typ == S_IFDIR { + node.Children = make(directory) + node.LinkCount = 1 // A directory is linked to itself. + } + } else if node.Flags&format.InodeFlagExtents != 0 { + // Since we cannot deallocate or reuse blocks, don't allow updates that + // would invalidate data that has already been written. + return nil, errors.New("cannot overwrite file with non-inline data") + } + node.Mode = mode + node.Uid = f.Uid + node.Gid = f.Gid + node.Flags = format.InodeFlagHugeFile + node.Atime = timeToFsTime(f.Atime) + node.Ctime = timeToFsTime(f.Ctime) + node.Mtime = timeToFsTime(f.Mtime) + node.Crtime = timeToFsTime(f.Crtime) + node.Devmajor = f.Devmajor + node.Devminor = f.Devminor + node.Data = nil + node.XattrInline = nil + + var xstate xattrState + xstate.init() + + var size int64 + switch typ { + case format.S_IFREG: + size = f.Size + if f.Size > maxFileSize { + return nil, fmt.Errorf("file too big: %d > %d", f.Size, int64(maxFileSize)) + } + if f.Size <= inlineDataSize && w.supportInlineData { + node.Data = make([]byte, f.Size) + extra := 0 + if f.Size > inodeDataSize { + extra = int(f.Size - inodeDataSize) + } + // Add a dummy entry for now. + if !xstate.addXattr("system.data", node.Data[:extra]) { + panic("not enough room for inline data") + } + node.Flags |= format.InodeFlagInlineData + } + case format.S_IFLNK: + node.Mode |= 0777 // Symlinks should appear as ugw rwx + size = int64(len(f.Linkname)) + if size <= smallSymlinkSize { + // Special case: small symlinks go directly in Block without setting + // an inline data flag. + node.Data = make([]byte, len(f.Linkname)) + copy(node.Data, f.Linkname) + } + case format.S_IFDIR, format.S_IFIFO, format.S_IFSOCK, format.S_IFCHR, format.S_IFBLK: + default: + return nil, fmt.Errorf("invalid mode %o", mode) + } + + // Accumulate the extended attributes. + if len(f.Xattrs) != 0 { + // Sort the xattrs to avoid non-determinism in map iteration. + var xattrs []string + for name := range f.Xattrs { + xattrs = append(xattrs, name) + } + sort.Strings(xattrs) + for _, name := range xattrs { + if !xstate.addXattr(name, f.Xattrs[name]) { + return nil, fmt.Errorf("could not fit xattr %s", name) + } + } + } + + if err := w.writeXattrs(node, &xstate); err != nil { + return nil, err + } + + node.Size = size + if typ == format.S_IFLNK && size > smallSymlinkSize { + // Write the link name as data. + w.startInode("", node, size) + if _, err := w.Write([]byte(f.Linkname)); err != nil { + return nil, err + } + if err := w.finishInode(); err != nil { + return nil, err + } + } + + if int(node.Number-1) >= len(w.inodes) { + w.inodes = append(w.inodes, node) + } + return node, nil +} + +func (w *Writer) root() *inode { + return w.getInode(format.InodeRoot) +} + +func (w *Writer) lookup(name string, mustExist bool) (*inode, *inode, string, error) { + root := w.root() + cleanname := path.Clean("/" + name)[1:] + if len(cleanname) == 0 { + return root, root, "", nil + } + dirname, childname := path.Split(cleanname) + if len(childname) == 0 || len(childname) > 0xff { + return nil, nil, "", fmt.Errorf("%s: invalid name", name) + } + dir := w.findPath(root, dirname) + if dir == nil || !dir.IsDir() { + return nil, nil, "", fmt.Errorf("%s: path not found", name) + } + child := dir.Children[childname] + if child == nil && mustExist { + return nil, nil, "", fmt.Errorf("%s: file not found", name) + } + return dir, child, childname, nil +} + +// Create adds a file to the file system. +func (w *Writer) Create(name string, f *File) error { + if err := w.finishInode(); err != nil { + return err + } + dir, existing, childname, err := w.lookup(name, false) + if err != nil { + return err + } + var reuse *inode + if existing != nil { + if existing.IsDir() { + if f.Mode&TypeMask != S_IFDIR { + return fmt.Errorf("%s: cannot replace a directory with a file", name) + } + reuse = existing + } else if f.Mode&TypeMask == S_IFDIR { + return fmt.Errorf("%s: cannot replace a file with a directory", name) + } else if existing.LinkCount < 2 { + reuse = existing + } + } else { + if f.Mode&TypeMask == S_IFDIR && dir.LinkCount >= format.MaxLinks { + return fmt.Errorf("%s: exceeded parent directory maximum link count", name) + } + } + child, err := w.makeInode(f, reuse) + if err != nil { + return fmt.Errorf("%s: %s", name, err) + } + if existing != child { + if existing != nil { + existing.LinkCount-- + } + dir.Children[childname] = child + child.LinkCount++ + if child.IsDir() { + dir.LinkCount++ + } + } + if child.Mode&format.TypeMask == format.S_IFREG { + w.startInode(name, child, f.Size) + } + return nil +} + +// Link adds a hard link to the file system. +func (w *Writer) Link(oldname, newname string) error { + if err := w.finishInode(); err != nil { + return err + } + newdir, existing, newchildname, err := w.lookup(newname, false) + if err != nil { + return err + } + if existing != nil && (existing.IsDir() || existing.LinkCount < 2) { + return fmt.Errorf("%s: cannot orphan existing file or directory", newname) + } + + _, oldfile, _, err := w.lookup(oldname, true) + if err != nil { + return err + } + switch oldfile.Mode & format.TypeMask { + case format.S_IFDIR, format.S_IFLNK: + return fmt.Errorf("%s: link target cannot be a directory or symlink: %s", newname, oldname) + } + + if existing != oldfile && oldfile.LinkCount >= format.MaxLinks { + return fmt.Errorf("%s: link target would exceed maximum link count: %s", newname, oldname) + } + + if existing != nil { + existing.LinkCount-- + } + oldfile.LinkCount++ + newdir.Children[newchildname] = oldfile + return nil +} + +// Stat returns information about a file that has been written. +func (w *Writer) Stat(name string) (*File, error) { + if err := w.finishInode(); err != nil { + return nil, err + } + _, node, _, err := w.lookup(name, true) + if err != nil { + return nil, err + } + f := &File{ + Size: node.Size, + Mode: node.Mode, + Uid: node.Uid, + Gid: node.Gid, + Atime: fsTimeToTime(node.Atime), + Ctime: fsTimeToTime(node.Ctime), + Mtime: fsTimeToTime(node.Mtime), + Crtime: fsTimeToTime(node.Crtime), + Devmajor: node.Devmajor, + Devminor: node.Devminor, + } + f.Xattrs = make(map[string][]byte) + if node.XattrBlock != 0 || len(node.XattrInline) != 0 { + if node.XattrBlock != 0 { + orig := w.block() + w.seekBlock(node.XattrBlock) + if w.err != nil { + return nil, w.err + } + var b [blockSize]byte + _, err := w.f.Read(b[:]) + w.seekBlock(orig) + if err != nil { + return nil, err + } + getXattrs(b[32:], f.Xattrs, 32) + } + if len(node.XattrInline) != 0 { + getXattrs(node.XattrInline[4:], f.Xattrs, 0) + delete(f.Xattrs, "system.data") + } + } + if node.FileType() == S_IFLNK { + if node.Size > smallSymlinkSize { + return nil, fmt.Errorf("%s: cannot retrieve link information", name) + } + f.Linkname = string(node.Data) + } + return f, nil +} + +func (w *Writer) Write(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + if w.dataWritten+int64(len(b)) > w.dataMax { + return 0, fmt.Errorf("%s: wrote too much: %d > %d", w.curName, w.dataWritten+int64(len(b)), w.dataMax) + } + + if w.curInode.Flags&format.InodeFlagInlineData != 0 { + copy(w.curInode.Data[w.dataWritten:], b) + w.dataWritten += int64(len(b)) + return len(b), nil + } + + n, err := w.write(b) + w.dataWritten += int64(n) + return n, err +} + +func (w *Writer) startInode(name string, inode *inode, size int64) { + if w.curInode != nil { + panic("inode already in progress") + } + w.curName = name + w.curInode = inode + w.dataWritten = 0 + w.dataMax = size +} + +func (w *Writer) block() uint32 { + return uint32(w.pos / blockSize) +} + +func (w *Writer) seekBlock(block uint32) { + w.pos = int64(block) * blockSize + if w.err != nil { + return + } + w.err = w.bw.Flush() + if w.err != nil { + return + } + _, w.err = w.f.Seek(w.pos, io.SeekStart) +} + +func (w *Writer) nextBlock() { + if w.pos%blockSize != 0 { + // Simplify callers; w.err is updated on failure. + w.zero(blockSize - w.pos%blockSize) + } +} + +func fillExtents(hdr *format.ExtentHeader, extents []format.ExtentLeafNode, startBlock, offset, inodeSize uint32) { + *hdr = format.ExtentHeader{ + Magic: format.ExtentHeaderMagic, + Entries: uint16(len(extents)), + Max: uint16(cap(extents)), + Depth: 0, + } + for i := range extents { + block := offset + uint32(i)*maxBlocksPerExtent + length := inodeSize - block + if length > maxBlocksPerExtent { + length = maxBlocksPerExtent + } + start := startBlock + block + extents[i] = format.ExtentLeafNode{ + Block: block, + Length: uint16(length), + StartLow: start, + } + } +} + +func (w *Writer) writeExtents(inode *inode) error { + start := w.pos - w.dataWritten + if start%blockSize != 0 { + panic("unaligned") + } + w.nextBlock() + + startBlock := uint32(start / blockSize) + blocks := w.block() - startBlock + usedBlocks := blocks + + const extentNodeSize = 12 + const extentsPerBlock = blockSize/extentNodeSize - 1 + + extents := (blocks + maxBlocksPerExtent - 1) / maxBlocksPerExtent + var b bytes.Buffer + if extents == 0 { + // Nothing to do. + } else if extents <= 4 { + var root struct { + hdr format.ExtentHeader + extents [4]format.ExtentLeafNode + } + fillExtents(&root.hdr, root.extents[:extents], startBlock, 0, blocks) + binary.Write(&b, binary.LittleEndian, root) + } else if extents <= 4*extentsPerBlock { + const extentsPerBlock = blockSize/extentNodeSize - 1 + extentBlocks := extents/extentsPerBlock + 1 + usedBlocks += extentBlocks + var b2 bytes.Buffer + + var root struct { + hdr format.ExtentHeader + nodes [4]format.ExtentIndexNode + } + root.hdr = format.ExtentHeader{ + Magic: format.ExtentHeaderMagic, + Entries: uint16(extentBlocks), + Max: 4, + Depth: 1, + } + for i := uint32(0); i < extentBlocks; i++ { + root.nodes[i] = format.ExtentIndexNode{ + Block: i * extentsPerBlock * maxBlocksPerExtent, + LeafLow: w.block(), + } + extentsInBlock := extents - i*extentBlocks + if extentsInBlock > extentsPerBlock { + extentsInBlock = extentsPerBlock + } + + var node struct { + hdr format.ExtentHeader + extents [extentsPerBlock]format.ExtentLeafNode + _ [blockSize - (extentsPerBlock+1)*extentNodeSize]byte + } + + offset := i * extentsPerBlock * maxBlocksPerExtent + fillExtents(&node.hdr, node.extents[:extentsInBlock], startBlock+offset, offset, blocks) + binary.Write(&b2, binary.LittleEndian, node) + if _, err := w.write(b2.Next(blockSize)); err != nil { + return err + } + } + binary.Write(&b, binary.LittleEndian, root) + } else { + panic("file too big") + } + + inode.Data = b.Bytes() + inode.Flags |= format.InodeFlagExtents + inode.BlockCount += usedBlocks + return w.err +} + +func (w *Writer) finishInode() error { + if !w.initialized { + if err := w.init(); err != nil { + return err + } + } + if w.curInode == nil { + return nil + } + if w.dataWritten != w.dataMax { + return fmt.Errorf("did not write the right amount: %d != %d", w.dataWritten, w.dataMax) + } + + if w.dataMax != 0 && w.curInode.Flags&format.InodeFlagInlineData == 0 { + if err := w.writeExtents(w.curInode); err != nil { + return err + } + } + + w.dataWritten = 0 + w.dataMax = 0 + w.curInode = nil + return w.err +} + +func modeToFileType(mode uint16) format.FileType { + switch mode & format.TypeMask { + default: + return format.FileTypeUnknown + case format.S_IFREG: + return format.FileTypeRegular + case format.S_IFDIR: + return format.FileTypeDirectory + case format.S_IFCHR: + return format.FileTypeCharacter + case format.S_IFBLK: + return format.FileTypeBlock + case format.S_IFIFO: + return format.FileTypeFIFO + case format.S_IFSOCK: + return format.FileTypeSocket + case format.S_IFLNK: + return format.FileTypeSymbolicLink + } +} + +type constReader byte + +var zero = constReader(0) + +func (r constReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = byte(r) + } + return len(b), nil +} + +func (w *Writer) writeDirectory(dir, parent *inode) error { + if err := w.finishInode(); err != nil { + return err + } + + // The size of the directory is not known yet. + w.startInode("", dir, 0x7fffffffffffffff) + left := blockSize + finishBlock := func() error { + if left > 0 { + e := format.DirectoryEntry{ + RecordLength: uint16(left), + } + err := binary.Write(w, binary.LittleEndian, e) + if err != nil { + return err + } + left -= directoryEntrySize + if left < 4 { + panic("not enough space for trailing entry") + } + _, err = io.CopyN(w, zero, int64(left)) + if err != nil { + return err + } + } + left = blockSize + return nil + } + + writeEntry := func(ino format.InodeNumber, name string) error { + rlb := directoryEntrySize + len(name) + rl := (rlb + 3) & ^3 + if left < rl+12 { + if err := finishBlock(); err != nil { + return err + } + } + e := format.DirectoryEntry{ + Inode: ino, + RecordLength: uint16(rl), + NameLength: uint8(len(name)), + FileType: modeToFileType(w.getInode(ino).Mode), + } + err := binary.Write(w, binary.LittleEndian, e) + if err != nil { + return err + } + _, err = w.Write([]byte(name)) + if err != nil { + return err + } + var zero [4]byte + _, err = w.Write(zero[:rl-rlb]) + if err != nil { + return err + } + left -= rl + return nil + } + if err := writeEntry(dir.Number, "."); err != nil { + return err + } + if err := writeEntry(parent.Number, ".."); err != nil { + return err + } + + // Follow e2fsck's convention and sort the children by inode number. + var children []string + for name := range dir.Children { + children = append(children, name) + } + sort.Slice(children, func(i, j int) bool { + return dir.Children[children[i]].Number < dir.Children[children[j]].Number + }) + + for _, name := range children { + child := dir.Children[name] + if err := writeEntry(child.Number, name); err != nil { + return err + } + } + if err := finishBlock(); err != nil { + return err + } + w.curInode.Size = w.dataWritten + w.dataMax = w.dataWritten + return nil +} + +func (w *Writer) writeDirectoryRecursive(dir, parent *inode) error { + if err := w.writeDirectory(dir, parent); err != nil { + return err + } + for _, child := range dir.Children { + if child.IsDir() { + if err := w.writeDirectoryRecursive(child, dir); err != nil { + return err + } + } + } + return nil +} + +func (w *Writer) writeInodeTable(tableSize uint32) error { + var b bytes.Buffer + for _, inode := range w.inodes { + if inode != nil { + binode := format.Inode{ + Mode: inode.Mode, + Uid: uint16(inode.Uid & 0xffff), + Gid: uint16(inode.Gid & 0xffff), + SizeLow: uint32(inode.Size & 0xffffffff), + SizeHigh: uint32(inode.Size >> 32), + LinksCount: uint16(inode.LinkCount), + BlocksLow: inode.BlockCount, + Flags: inode.Flags, + XattrBlockLow: inode.XattrBlock, + UidHigh: uint16(inode.Uid >> 16), + GidHigh: uint16(inode.Gid >> 16), + ExtraIsize: uint16(inodeUsedSize - 128), + Atime: uint32(inode.Atime), + AtimeExtra: uint32(inode.Atime >> 32), + Ctime: uint32(inode.Ctime), + CtimeExtra: uint32(inode.Ctime >> 32), + Mtime: uint32(inode.Mtime), + MtimeExtra: uint32(inode.Mtime >> 32), + Crtime: uint32(inode.Crtime), + CrtimeExtra: uint32(inode.Crtime >> 32), + } + switch inode.Mode & format.TypeMask { + case format.S_IFDIR, format.S_IFREG, format.S_IFLNK: + n := copy(binode.Block[:], inode.Data) + if n < len(inode.Data) { + // Rewrite the first xattr with the data. + xattr := [1]xattr{{ + Name: "data", + Index: 7, // "system." + Value: inode.Data[n:], + }} + putXattrs(xattr[:], inode.XattrInline[4:], 0) + } + case format.S_IFBLK, format.S_IFCHR: + dev := inode.Devminor&0xff | inode.Devmajor<<8 | (inode.Devminor&0xffffff00)<<12 + binary.LittleEndian.PutUint32(binode.Block[4:], dev) + } + + binary.Write(&b, binary.LittleEndian, binode) + b.Truncate(inodeUsedSize) + n, _ := b.Write(inode.XattrInline) + io.CopyN(&b, zero, int64(inodeExtraSize-n)) + } else { + io.CopyN(&b, zero, inodeSize) + } + if _, err := w.write(b.Next(inodeSize)); err != nil { + return err + } + } + rest := tableSize - uint32(len(w.inodes)*inodeSize) + if _, err := w.zero(int64(rest)); err != nil { + return err + } + return nil +} + +// NewWriter returns a Writer that writes an ext4 file system to the provided +// WriteSeeker. +func NewWriter(f io.ReadWriteSeeker, opts ...Option) *Writer { + w := &Writer{ + f: f, + bw: bufio.NewWriterSize(f, 65536*8), + maxDiskSize: defaultMaxDiskSize, + } + for _, opt := range opts { + opt(w) + } + return w +} + +// An Option provides extra options to NewWriter. +type Option func(*Writer) + +// InlineData instructs the Writer to write small files into the inode +// structures directly. This creates smaller images but currently is not +// compatible with DAX. +func InlineData(w *Writer) { + w.supportInlineData = true +} + +// MaximumDiskSize instructs the writer to reserve enough metadata space for the +// specified disk size. If not provided, then 16GB is the default. +func MaximumDiskSize(size int64) Option { + return func(w *Writer) { + if size < 0 || size > maxMaxDiskSize { + w.maxDiskSize = maxMaxDiskSize + } else if size == 0 { + w.maxDiskSize = defaultMaxDiskSize + } else { + w.maxDiskSize = (size + blockSize - 1) &^ (blockSize - 1) + } + } +} + +func (w *Writer) init() error { + // Skip the defective block inode. + w.inodes = make([]*inode, 1, 32) + // Create the root directory. + root, _ := w.makeInode(&File{ + Mode: format.S_IFDIR | 0755, + }, nil) + root.LinkCount++ // The root is linked to itself. + // Skip until the first non-reserved inode. + w.inodes = append(w.inodes, make([]*inode, inodeFirst-len(w.inodes)-1)...) + maxBlocks := (w.maxDiskSize-1)/blockSize + 1 + maxGroups := (maxBlocks-1)/blocksPerGroup + 1 + w.gdBlocks = uint32((maxGroups-1)/groupsPerDescriptorBlock + 1) + + // Skip past the superblock and block descriptor table. + w.seekBlock(1 + w.gdBlocks) + w.initialized = true + + // The lost+found directory is required to exist for e2fsck to pass. + if err := w.Create("lost+found", &File{Mode: format.S_IFDIR | 0700}); err != nil { + return err + } + return w.err +} + +func groupCount(blocks uint32, inodes uint32, inodesPerGroup uint32) uint32 { + inodeBlocksPerGroup := inodesPerGroup * inodeSize / blockSize + dataBlocksPerGroup := blocksPerGroup - inodeBlocksPerGroup - 2 // save room for the bitmaps + + // Increase the block count to ensure there are enough groups for all the + // inodes. + minBlocks := (inodes-1)/inodesPerGroup*dataBlocksPerGroup + 1 + if blocks < minBlocks { + blocks = minBlocks + } + + return (blocks + dataBlocksPerGroup - 1) / dataBlocksPerGroup +} + +func bestGroupCount(blocks uint32, inodes uint32) (groups uint32, inodesPerGroup uint32) { + groups = 0xffffffff + for ipg := uint32(inodesPerGroupIncrement); ipg <= maxInodesPerGroup; ipg += inodesPerGroupIncrement { + g := groupCount(blocks, inodes, ipg) + if g < groups { + groups = g + inodesPerGroup = ipg + } + } + return +} + +func (w *Writer) Close() error { + if err := w.finishInode(); err != nil { + return err + } + root := w.root() + if err := w.writeDirectoryRecursive(root, root); err != nil { + return err + } + // Finish the last inode (probably a directory). + if err := w.finishInode(); err != nil { + return err + } + + // Write the inode table + inodeTableOffset := w.block() + groups, inodesPerGroup := bestGroupCount(inodeTableOffset, uint32(len(w.inodes))) + err := w.writeInodeTable(groups * inodesPerGroup * inodeSize) + if err != nil { + return err + } + + // Write the bitmaps. + bitmapOffset := w.block() + bitmapSize := groups * 2 + validDataSize := bitmapOffset + bitmapSize + diskSize := validDataSize + minSize := (groups-1)*blocksPerGroup + 1 + if diskSize < minSize { + diskSize = minSize + } + + usedGdBlocks := (groups-1)/groupDescriptorSize + 1 + if usedGdBlocks > w.gdBlocks { + return exceededMaxSizeError{w.maxDiskSize} + } + + gds := make([]format.GroupDescriptor, w.gdBlocks*groupsPerDescriptorBlock) + inodeTableSizePerGroup := inodesPerGroup * inodeSize / blockSize + var totalUsedBlocks, totalUsedInodes uint32 + for g := uint32(0); g < groups; g++ { + var b [blockSize * 2]byte + var dirCount, usedInodeCount, usedBlockCount uint16 + + // Block bitmap + if (g+1)*blocksPerGroup <= validDataSize { + // This group is fully allocated. + for j := range b[:blockSize] { + b[j] = 0xff + } + usedBlockCount = blocksPerGroup + } else if g*blocksPerGroup < validDataSize { + for j := uint32(0); j < validDataSize-g*blocksPerGroup; j++ { + b[j/8] |= 1 << (j % 8) + usedBlockCount++ + } + } + if g == 0 { + // Unused group descriptor blocks should be cleared. + for j := 1 + usedGdBlocks; j < 1+w.gdBlocks; j++ { + b[j/8] &^= 1 << (j % 8) + usedBlockCount-- + } + } + if g == groups-1 && diskSize%blocksPerGroup != 0 { + // Blocks that aren't present in the disk should be marked as + // allocated. + for j := diskSize % blocksPerGroup; j < blocksPerGroup; j++ { + b[j/8] |= 1 << (j % 8) + usedBlockCount++ + } + } + // Inode bitmap + for j := uint32(0); j < inodesPerGroup; j++ { + ino := format.InodeNumber(1 + g*inodesPerGroup + j) + inode := w.getInode(ino) + if ino < inodeFirst || inode != nil { + b[blockSize+j/8] |= 1 << (j % 8) + usedInodeCount++ + } + if inode != nil && inode.Mode&format.TypeMask == format.S_IFDIR { + dirCount++ + } + } + _, err := w.write(b[:]) + if err != nil { + return err + } + gds[g] = format.GroupDescriptor{ + BlockBitmapLow: bitmapOffset + 2*g, + InodeBitmapLow: bitmapOffset + 2*g + 1, + InodeTableLow: inodeTableOffset + g*inodeTableSizePerGroup, + UsedDirsCountLow: dirCount, + FreeInodesCountLow: uint16(inodesPerGroup) - usedInodeCount, + FreeBlocksCountLow: blocksPerGroup - usedBlockCount, + } + + totalUsedBlocks += uint32(usedBlockCount) + totalUsedInodes += uint32(usedInodeCount) + } + + // Zero up to the disk size. + _, err = w.zero(int64(diskSize-bitmapOffset-bitmapSize) * blockSize) + if err != nil { + return err + } + + // Write the block descriptors + w.seekBlock(1) + if w.err != nil { + return w.err + } + err = binary.Write(w.bw, binary.LittleEndian, gds) + if err != nil { + return err + } + + // Write the super block + var blk [blockSize]byte + b := bytes.NewBuffer(blk[:1024]) + sb := &format.SuperBlock{ + InodesCount: inodesPerGroup * groups, + BlocksCountLow: diskSize, + FreeBlocksCountLow: blocksPerGroup*groups - totalUsedBlocks, + FreeInodesCount: inodesPerGroup*groups - totalUsedInodes, + FirstDataBlock: 0, + LogBlockSize: 2, // 2^(10 + 2) + LogClusterSize: 2, + BlocksPerGroup: blocksPerGroup, + ClustersPerGroup: blocksPerGroup, + InodesPerGroup: inodesPerGroup, + Magic: format.SuperBlockMagic, + State: 1, // cleanly unmounted + Errors: 1, // continue on error? + CreatorOS: 0, // Linux + RevisionLevel: 1, // dynamic inode sizes + FirstInode: inodeFirst, + LpfInode: inodeLostAndFound, + InodeSize: inodeSize, + FeatureCompat: format.CompatSparseSuper2 | format.CompatExtAttr, + FeatureIncompat: format.IncompatFiletype | format.IncompatExtents | format.IncompatFlexBg, + FeatureRoCompat: format.RoCompatLargeFile | format.RoCompatHugeFile | format.RoCompatExtraIsize | format.RoCompatReadonly, + MinExtraIsize: extraIsize, + WantExtraIsize: extraIsize, + LogGroupsPerFlex: 31, + } + if w.supportInlineData { + sb.FeatureIncompat |= format.IncompatInlineData + } + binary.Write(b, binary.LittleEndian, sb) + w.seekBlock(0) + if _, err := w.write(blk[:]); err != nil { + return err + } + w.seekBlock(diskSize) + return w.err +} diff --git a/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact_test.go b/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact_test.go new file mode 100644 index 00000000..b7b2a30b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/compact_test.go @@ -0,0 +1,355 @@ +package compactext4 + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "os" + "strings" + "testing" + "time" + + "github.com/Microsoft/hcsshim/ext4/internal/format" +) + +type testFile struct { + Path string + File *File + Data []byte + DataSize int64 + Link string + ExpectError bool +} + +var ( + data []byte + name string +) + +func init() { + data = make([]byte, blockSize*2) + for i := range data { + data[i] = uint8(i) + } + + nameb := make([]byte, 300) + for i := range nameb { + nameb[i] = byte('0' + i%10) + } + name = string(nameb) +} + +type largeData struct { + pos int64 +} + +func (d *largeData) Read(b []byte) (int, error) { + p := d.pos + var pb [8]byte + for i := range b { + binary.LittleEndian.PutUint64(pb[:], uint64(p+int64(i))) + b[i] = pb[i%8] + } + p += int64(len(b)) + return len(b), nil +} + +func (tf *testFile) Reader() io.Reader { + if tf.DataSize != 0 { + return io.LimitReader(&largeData{}, tf.DataSize) + } + return bytes.NewReader(tf.Data) +} + +func createTestFile(t *testing.T, w *Writer, tf testFile) { + var err error + if tf.File != nil { + tf.File.Size = int64(len(tf.Data)) + if tf.File.Size == 0 { + tf.File.Size = tf.DataSize + } + err = w.Create(tf.Path, tf.File) + } else { + err = w.Link(tf.Link, tf.Path) + } + if tf.ExpectError && err == nil { + t.Errorf("%s: expected error", tf.Path) + } else if !tf.ExpectError && err != nil { + t.Error(err) + } else { + _, err := io.Copy(w, tf.Reader()) + if err != nil { + t.Error(err) + } + } +} + +func expectedMode(f *File) uint16 { + switch f.Mode & format.TypeMask { + case 0: + return f.Mode | S_IFREG + case S_IFLNK: + return f.Mode | 0777 + default: + return f.Mode + } +} + +func expectedSize(f *File) int64 { + switch f.Mode & format.TypeMask { + case 0, S_IFREG: + return f.Size + case S_IFLNK: + return int64(len(f.Linkname)) + default: + return 0 + } +} + +func xattrsEqual(x1, x2 map[string][]byte) bool { + if len(x1) != len(x2) { + return false + } + for name, value := range x1 { + if !bytes.Equal(x2[name], value) { + return false + } + } + return true +} + +func fileEqual(f1, f2 *File) bool { + return f1.Linkname == f2.Linkname && + expectedSize(f1) == expectedSize(f2) && + expectedMode(f1) == expectedMode(f2) && + f1.Uid == f2.Uid && + f1.Gid == f2.Gid && + f1.Atime.Equal(f2.Atime) && + f1.Ctime.Equal(f2.Ctime) && + f1.Mtime.Equal(f2.Mtime) && + f1.Crtime.Equal(f2.Crtime) && + f1.Devmajor == f2.Devmajor && + f1.Devminor == f2.Devminor && + xattrsEqual(f1.Xattrs, f2.Xattrs) +} + +func runTestsOnFiles(t *testing.T, testFiles []testFile, opts ...Option) { + image := "testfs.img" + imagef, err := os.Create(image) + if err != nil { + t.Fatal(err) + } + defer os.Remove(image) + defer imagef.Close() + + w := NewWriter(imagef, opts...) + for _, tf := range testFiles { + createTestFile(t, w, tf) + if !tf.ExpectError && tf.File != nil { + f, err := w.Stat(tf.Path) + if err != nil { + if !strings.Contains(err.Error(), "cannot retrieve") { + t.Error(err) + } + } else if !fileEqual(f, tf.File) { + t.Errorf("%s: stat mismatch: %#v %#v", tf.Path, tf.File, f) + } + } + } + + if t.Failed() { + return + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + fsck(t, image) + + mountPath := "testmnt" + + if mountImage(t, image, mountPath) { + defer unmountImage(t, mountPath) + validated := make(map[string]*testFile) + for i := range testFiles { + tf := testFiles[len(testFiles)-i-1] + if validated[tf.Link] != nil { + // The link target was subsequently replaced. Find the + // earlier instance. + for j := range testFiles[:len(testFiles)-i-1] { + otf := testFiles[j] + if otf.Path == tf.Link && !otf.ExpectError { + tf = otf + break + } + } + } + if !tf.ExpectError && validated[tf.Path] == nil { + verifyTestFile(t, mountPath, tf) + validated[tf.Path] = &tf + } + } + } +} + +func TestBasic(t *testing.T) { + now := time.Now() + testFiles := []testFile{ + {Path: "empty", File: &File{Mode: 0644}}, + {Path: "small", File: &File{Mode: 0644}, Data: data[:40]}, + {Path: "time", File: &File{Atime: now, Ctime: now.Add(time.Second), Mtime: now.Add(time.Hour)}}, + {Path: "block_1", File: &File{Mode: 0644}, Data: data[:blockSize]}, + {Path: "block_2", File: &File{Mode: 0644}, Data: data[:blockSize*2]}, + {Path: "symlink", File: &File{Linkname: "block_1", Mode: format.S_IFLNK}}, + {Path: "symlink_59", File: &File{Linkname: name[:59], Mode: format.S_IFLNK}}, + {Path: "symlink_60", File: &File{Linkname: name[:60], Mode: format.S_IFLNK}}, + {Path: "symlink_120", File: &File{Linkname: name[:120], Mode: format.S_IFLNK}}, + {Path: "symlink_300", File: &File{Linkname: name[:300], Mode: format.S_IFLNK}}, + {Path: "dir", File: &File{Mode: format.S_IFDIR | 0755}}, + {Path: "dir/fifo", File: &File{Mode: format.S_IFIFO}}, + {Path: "dir/sock", File: &File{Mode: format.S_IFSOCK}}, + {Path: "dir/blk", File: &File{Mode: format.S_IFBLK, Devmajor: 0x5678, Devminor: 0x1234}}, + {Path: "dir/chr", File: &File{Mode: format.S_IFCHR, Devmajor: 0x5678, Devminor: 0x1234}}, + {Path: "dir/hard_link", Link: "small"}, + } + + runTestsOnFiles(t, testFiles) +} + +func TestLargeDirectory(t *testing.T) { + testFiles := []testFile{ + {Path: "bigdir", File: &File{Mode: format.S_IFDIR | 0755}}, + } + for i := 0; i < 50000; i++ { + testFiles = append(testFiles, testFile{ + Path: fmt.Sprintf("bigdir/%d", i), File: &File{Mode: 0644}, + }) + } + + runTestsOnFiles(t, testFiles) +} + +func TestInlineData(t *testing.T) { + testFiles := []testFile{ + {Path: "inline_30", File: &File{Mode: 0644}, Data: data[:30]}, + {Path: "inline_60", File: &File{Mode: 0644}, Data: data[:60]}, + {Path: "inline_120", File: &File{Mode: 0644}, Data: data[:120]}, + {Path: "inline_full", File: &File{Mode: 0644}, Data: data[:inlineDataSize]}, + {Path: "block_min", File: &File{Mode: 0644}, Data: data[:inlineDataSize+1]}, + } + + runTestsOnFiles(t, testFiles, InlineData) +} + +func TestXattrs(t *testing.T) { + testFiles := []testFile{ + {Path: "withsmallxattrs", + File: &File{ + Mode: format.S_IFREG | 0644, + Xattrs: map[string][]byte{ + "user.foo": []byte("test"), + "user.bar": []byte("test2"), + }, + }, + }, + {Path: "withlargexattrs", + File: &File{ + Mode: format.S_IFREG | 0644, + Xattrs: map[string][]byte{ + "user.foo": data[:100], + "user.bar": data[:50], + }, + }, + }, + } + runTestsOnFiles(t, testFiles) +} + +func TestReplace(t *testing.T) { + testFiles := []testFile{ + {Path: "lost+found", ExpectError: true, File: &File{}}, // can't change type + {Path: "lost+found", File: &File{Mode: format.S_IFDIR | 0777}}, + + {Path: "dir", File: &File{Mode: format.S_IFDIR | 0777}}, + {Path: "dir/file", File: &File{}}, + {Path: "dir", File: &File{Mode: format.S_IFDIR | 0700}}, + + {Path: "file", File: &File{}}, + {Path: "file", File: &File{Mode: 0600}}, + {Path: "file2", File: &File{}}, + {Path: "link", Link: "file2"}, + {Path: "file2", File: &File{Mode: 0600}}, + + {Path: "nolinks", File: &File{}}, + {Path: "nolinks", ExpectError: true, Link: "file"}, // would orphan nolinks + + {Path: "onelink", File: &File{}}, + {Path: "onelink2", Link: "onelink"}, + {Path: "onelink", Link: "file"}, + + {Path: "", ExpectError: true, File: &File{}}, + {Path: "", ExpectError: true, Link: "file"}, + {Path: "", File: &File{Mode: format.S_IFDIR | 0777}}, + + {Path: "smallxattr", File: &File{Xattrs: map[string][]byte{"user.foo": data[:4]}}}, + {Path: "smallxattr", File: &File{Xattrs: map[string][]byte{"user.foo": data[:8]}}}, + + {Path: "smallxattr_delete", File: &File{Xattrs: map[string][]byte{"user.foo": data[:4]}}}, + {Path: "smallxattr_delete", File: &File{}}, + + {Path: "largexattr", File: &File{Xattrs: map[string][]byte{"user.small": data[:8], "user.foo": data[:200]}}}, + {Path: "largexattr", File: &File{Xattrs: map[string][]byte{"user.small": data[:12], "user.foo": data[:400]}}}, + + {Path: "largexattr", File: &File{Xattrs: map[string][]byte{"user.foo": data[:200]}}}, + {Path: "largexattr_delete", File: &File{}}, + } + runTestsOnFiles(t, testFiles) +} + +func TestTime(t *testing.T) { + now := time.Now() + now2 := fsTimeToTime(timeToFsTime(now)) + if now.UnixNano() != now2.UnixNano() { + t.Fatalf("%s != %s", now, now2) + } +} + +func TestLargeFile(t *testing.T) { + testFiles := []testFile{ + {Path: "small", File: &File{}, DataSize: 1024 * 1024}, // can't change type + {Path: "medium", File: &File{}, DataSize: 200 * 1024 * 1024}, // can't change type + {Path: "large", File: &File{}, DataSize: 600 * 1024 * 1024}, // can't change type + } + runTestsOnFiles(t, testFiles) +} + +func TestFileLinkLimit(t *testing.T) { + testFiles := []testFile{ + {Path: "file", File: &File{}}, + } + for i := 0; i < format.MaxLinks; i++ { + testFiles = append(testFiles, testFile{Path: fmt.Sprintf("link%d", i), Link: "file"}) + } + testFiles[len(testFiles)-1].ExpectError = true + runTestsOnFiles(t, testFiles) +} + +func TestDirLinkLimit(t *testing.T) { + testFiles := []testFile{ + {Path: "dir", File: &File{Mode: S_IFDIR}}, + } + for i := 0; i < format.MaxLinks-1; i++ { + testFiles = append(testFiles, testFile{Path: fmt.Sprintf("dir/%d", i), File: &File{Mode: S_IFDIR}}) + } + testFiles[len(testFiles)-1].ExpectError = true + runTestsOnFiles(t, testFiles) +} + +func TestLargeDisk(t *testing.T) { + testFiles := []testFile{ + {Path: "file", File: &File{}}, + } + runTestsOnFiles(t, testFiles, MaximumDiskSize(maxMaxDiskSize)) +} diff --git a/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/verify_linux_test.go b/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/verify_linux_test.go new file mode 100644 index 00000000..86ece03b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/ext4/internal/compactext4/verify_linux_test.go @@ -0,0 +1,248 @@ +package compactext4 + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path" + "syscall" + "testing" + "time" + "unsafe" + + "github.com/Microsoft/hcsshim/ext4/internal/format" +) + +func timeEqual(ts syscall.Timespec, t time.Time) bool { + sec, nsec := t.Unix(), t.Nanosecond() + if t.IsZero() { + sec, nsec = 0, 0 + } + return ts.Sec == sec && int(ts.Nsec) == nsec +} + +func expectedDevice(f *File) uint64 { + return uint64(f.Devminor&0xff | f.Devmajor<<8 | (f.Devminor&0xffffff00)<<12) +} + +func llistxattr(path string, b []byte) (int, error) { + pathp := syscall.StringBytePtr(path) + var p unsafe.Pointer + if len(b) > 0 { + p = unsafe.Pointer(&b[0]) + } + r, _, e := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathp)), uintptr(p), uintptr(len(b))) + if e != 0 { + return 0, &os.PathError{Path: path, Op: "llistxattr", Err: syscall.Errno(e)} + } + return int(r), nil +} + +func lgetxattr(path string, name string, b []byte) (int, error) { + pathp := syscall.StringBytePtr(path) + namep := syscall.StringBytePtr(name) + var p unsafe.Pointer + if len(b) > 0 { + p = unsafe.Pointer(&b[0]) + } + r, _, e := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathp)), uintptr(unsafe.Pointer(namep)), uintptr(p), uintptr(len(b)), 0, 0) + if e != 0 { + return 0, &os.PathError{Path: path, Op: "lgetxattr", Err: syscall.Errno(e)} + } + return int(r), nil +} + +func readXattrs(path string) (map[string][]byte, error) { + xattrs := make(map[string][]byte) + var buf [4096]byte + var buf2 [4096]byte + b := buf[:] + n, err := llistxattr(path, b) + if err != nil { + return nil, err + } + b = b[:n] + for len(b) != 0 { + nn := bytes.IndexByte(b, 0) + name := string(b[:nn]) + b = b[nn+1:] + vn, err := lgetxattr(path, name, buf2[:]) + if err != nil { + return nil, err + } + value := buf2[:vn] + xattrs[name] = value + } + return xattrs, nil +} + +func streamEqual(r1, r2 io.Reader) (bool, error) { + var b [4096]byte + var b2 [4096]byte + for { + n, err := r1.Read(b[:]) + if n == 0 { + if err == io.EOF { + break + } + if err == nil { + continue + } + return false, err + } + _, err = io.ReadFull(r2, b2[:n]) + if err == io.EOF || err == io.ErrUnexpectedEOF { + return false, nil + } + if err != nil { + return false, err + } + if !bytes.Equal(b[n:], b2[n:]) { + return false, nil + } + } + // Check the tail of r2 + _, err := r2.Read(b[:1]) + if err == nil { + return false, nil + } + if err != io.EOF { + return false, err + } + return true, nil +} + +func verifyTestFile(t *testing.T, mountPath string, tf testFile) { + name := path.Join(mountPath, tf.Path) + fi, err := os.Lstat(name) + if err != nil { + t.Error(err) + return + } + st := fi.Sys().(*syscall.Stat_t) + if tf.File != nil { + if st.Mode != uint32(expectedMode(tf.File)) || + st.Uid != tf.File.Uid || + st.Gid != tf.File.Gid || + (!fi.IsDir() && st.Size != expectedSize(tf.File)) || + st.Rdev != expectedDevice(tf.File) || + !timeEqual(st.Atim, tf.File.Atime) || + !timeEqual(st.Mtim, tf.File.Mtime) || + !timeEqual(st.Ctim, tf.File.Ctime) { + + t.Errorf("%s: stat mismatch, expected: %#v got: %#v", tf.Path, tf.File, st) + } + + xattrs, err := readXattrs(name) + if err != nil { + t.Error(err) + } else if !xattrsEqual(xattrs, tf.File.Xattrs) { + t.Errorf("%s: xattr mismatch, expected: %#v got: %#v", tf.Path, tf.File.Xattrs, xattrs) + } + + switch tf.File.Mode & format.TypeMask { + case S_IFREG: + if f, err := os.Open(name); err != nil { + t.Error(err) + } else { + same, err := streamEqual(f, tf.Reader()) + if err != nil { + t.Error(err) + } else if !same { + t.Errorf("%s: data mismatch", tf.Path) + } + f.Close() + } + case S_IFLNK: + if link, err := os.Readlink(name); err != nil { + t.Error(err) + } else if link != tf.File.Linkname { + t.Errorf("%s: link mismatch, expected: %s got: %s", tf.Path, tf.File.Linkname, link) + } + } + } else { + lfi, err := os.Lstat(path.Join(mountPath, tf.Link)) + if err != nil { + t.Error(err) + return + } + + lst := lfi.Sys().(*syscall.Stat_t) + if lst.Ino != st.Ino { + t.Errorf("%s: hard link mismatch with %s, expected inode: %d got inode: %d", tf.Path, tf.Link, lst.Ino, st.Ino) + } + } +} + +type capHeader struct { + version uint32 + pid int +} + +type capData struct { + effective uint32 + permitted uint32 + inheritable uint32 +} + +const CAP_SYS_ADMIN = 21 + +type caps struct { + hdr capHeader + data [2]capData +} + +func getCaps() (caps, error) { + var c caps + + // Get capability version + if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(nil)), 0); errno != 0 { + return c, fmt.Errorf("SYS_CAPGET: %v", errno) + } + + // Get current capabilities + if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(&c.data[0])), 0); errno != 0 { + return c, fmt.Errorf("SYS_CAPGET: %v", errno) + } + + return c, nil +} + +func mountImage(t *testing.T, image string, mountPath string) bool { + caps, err := getCaps() + if err != nil || caps.data[0].effective&(1<[%s] Request : %s", method, path, request) + + err := _hnsCall(method, path, request, &responseBuffer) + if err != nil { + return hcserror.New(err, "hnsCall ", "") + } + response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) + + hnsresponse := &hnsResponse{} + if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { + return err + } + + if !hnsresponse.Success { + return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) + } + + if len(hnsresponse.Output) == 0 { + return nil + } + + logrus.Debugf("Network Response : %s", hnsresponse.Output) + err = json.Unmarshal(hnsresponse.Output, returnResponse) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go new file mode 100644 index 00000000..cff68e13 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go @@ -0,0 +1,335 @@ +package hcn + +import ( + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// LoadBalancerPortMapping is associated with HostComputeLoadBalancer +type LoadBalancerPortMapping struct { + Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + Flags LoadBalancerPortMappingFlags `json:",omitempty"` +} + +// HostComputeLoadBalancer represents software load balancer. +type HostComputeLoadBalancer struct { + Id string `json:"ID,omitempty"` + HostComputeEndpoints []string `json:",omitempty"` + SourceVIP string `json:",omitempty"` + FrontendVIPs []string `json:",omitempty"` + PortMappings []LoadBalancerPortMapping `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` + Flags LoadBalancerFlags `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn +} + +//LoadBalancerFlags modify settings for a loadbalancer. +type LoadBalancerFlags uint32 + +var ( + // LoadBalancerFlagsNone is the default. + LoadBalancerFlagsNone LoadBalancerFlags = 0 + // LoadBalancerFlagsDSR enables Direct Server Return (DSR) + LoadBalancerFlagsDSR LoadBalancerFlags = 1 +) + +// LoadBalancerPortMappingFlags are special settings on a loadbalancer. +type LoadBalancerPortMappingFlags uint32 + +var ( + // LoadBalancerPortMappingFlagsNone is the default. + LoadBalancerPortMappingFlagsNone LoadBalancerPortMappingFlags + // LoadBalancerPortMappingFlagsILB enables internal loadbalancing. + LoadBalancerPortMappingFlagsILB LoadBalancerPortMappingFlags = 1 + // LoadBalancerPortMappingFlagsLocalRoutedVIP enables VIP access from the host. + LoadBalancerPortMappingFlagsLocalRoutedVIP LoadBalancerPortMappingFlags = 2 + // LoadBalancerPortMappingFlagsUseMux enables DSR for NodePort access of VIP. + LoadBalancerPortMappingFlagsUseMux LoadBalancerPortMappingFlags = 4 + // LoadBalancerPortMappingFlagsPreserveDIP delivers packets with destination IP as the VIP. + LoadBalancerPortMappingFlagsPreserveDIP LoadBalancerPortMappingFlags = 8 +) + +func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) { + // Open loadBalancer. + var ( + loadBalancerHandle hcnLoadBalancer + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer) + if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil { + return nil, err + } + // Query loadBalancer. + hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close loadBalancer. + hr = hcnCloseLoadBalancer(loadBalancerHandle) + if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeLoadBalancer + var outputLoadBalancer HostComputeLoadBalancer + if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { + return nil, err + } + return &outputLoadBalancer, nil +} + +func enumerateLoadBalancers(query string) ([]HostComputeLoadBalancer, error) { + // Enumerate all LoadBalancer Guids + var ( + resultBuffer *uint16 + loadBalancerBuffer *uint16 + ) + hr := hcnEnumerateLoadBalancers(query, &loadBalancerBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateLoadBalancers", hr, resultBuffer); err != nil { + return nil, err + } + + loadBalancers := interop.ConvertAndFreeCoTaskMemString(loadBalancerBuffer) + var loadBalancerIds []guid.GUID + if err := json.Unmarshal([]byte(loadBalancers), &loadBalancerIds); err != nil { + return nil, err + } + + var outputLoadBalancers []HostComputeLoadBalancer + for _, loadBalancerGuid := range loadBalancerIds { + loadBalancer, err := getLoadBalancer(loadBalancerGuid, query) + if err != nil { + return nil, err + } + outputLoadBalancers = append(outputLoadBalancers, *loadBalancer) + } + return outputLoadBalancers, nil +} + +func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) { + // Create new loadBalancer. + var ( + loadBalancerHandle hcnLoadBalancer + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + loadBalancerGuid := guid.GUID{} + hr := hcnCreateLoadBalancer(&loadBalancerGuid, settings, &loadBalancerHandle, &resultBuffer) + if err := checkForErrors("hcnCreateLoadBalancer", hr, resultBuffer); err != nil { + return nil, err + } + // Query loadBalancer. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close loadBalancer. + hr = hcnCloseLoadBalancer(loadBalancerHandle) + if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeLoadBalancer + var outputLoadBalancer HostComputeLoadBalancer + if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { + return nil, err + } + return &outputLoadBalancer, nil +} + +func modifyLoadBalancer(loadBalancerId string, settings string) (*HostComputeLoadBalancer, error) { + loadBalancerGuid := guid.FromString(loadBalancerId) + // Open loadBalancer. + var ( + loadBalancerHandle hcnLoadBalancer + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer) + if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil { + return nil, err + } + // Modify loadBalancer. + hr = hcnModifyLoadBalancer(loadBalancerHandle, settings, &resultBuffer) + if err := checkForErrors("hcnModifyLoadBalancer", hr, resultBuffer); err != nil { + return nil, err + } + // Query loadBalancer. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close loadBalancer. + hr = hcnCloseLoadBalancer(loadBalancerHandle) + if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { + return nil, err + } + // Convert output to LoadBalancer + var outputLoadBalancer HostComputeLoadBalancer + if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { + return nil, err + } + return &outputLoadBalancer, nil +} + +func deleteLoadBalancer(loadBalancerId string) error { + loadBalancerGuid := guid.FromString(loadBalancerId) + var resultBuffer *uint16 + hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListLoadBalancers makes a call to list all available loadBalancers. +func ListLoadBalancers() ([]HostComputeLoadBalancer, error) { + hcnQuery := defaultQuery() + loadBalancers, err := ListLoadBalancersQuery(hcnQuery) + if err != nil { + return nil, err + } + return loadBalancers, nil +} + +// ListLoadBalancersQuery makes a call to query the list of available loadBalancers. +func ListLoadBalancersQuery(query HostComputeQuery) ([]HostComputeLoadBalancer, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + loadBalancers, err := enumerateLoadBalancers(string(queryJson)) + if err != nil { + return nil, err + } + return loadBalancers, nil +} + +// GetLoadBalancerByID returns the LoadBalancer specified by Id. +func GetLoadBalancerByID(loadBalancerId string) (*HostComputeLoadBalancer, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": loadBalancerId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + loadBalancers, err := ListLoadBalancersQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(loadBalancers) == 0 { + return nil, LoadBalancerNotFoundError{LoadBalancerId: loadBalancerId} + } + return &loadBalancers[0], err +} + +// Create LoadBalancer. +func (loadBalancer *HostComputeLoadBalancer) Create() (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::Create id=%s", loadBalancer.Id) + + jsonString, err := json.Marshal(loadBalancer) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeLoadBalancer::Create JSON: %s", jsonString) + loadBalancer, hcnErr := createLoadBalancer(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return loadBalancer, nil +} + +// Delete LoadBalancer. +func (loadBalancer *HostComputeLoadBalancer) Delete() error { + logrus.Debugf("hcn::HostComputeLoadBalancer::Delete id=%s", loadBalancer.Id) + + if err := deleteLoadBalancer(loadBalancer.Id); err != nil { + return err + } + return nil +} + +// AddEndpoint add an endpoint to a LoadBalancer +func (loadBalancer *HostComputeLoadBalancer) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::AddEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) + + err := loadBalancer.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) + + return loadBalancer.Create() +} + +// RemoveEndpoint removes an endpoint from a LoadBalancer +func (loadBalancer *HostComputeLoadBalancer) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::RemoveEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) + + err := loadBalancer.Delete() + if err != nil { + return nil, err + } + + // Create a list of all the endpoints besides the one being removed + var endpoints []string + for _, endpointReference := range loadBalancer.HostComputeEndpoints { + if endpointReference == endpoint.Id { + continue + } + endpoints = append(endpoints, endpointReference) + } + loadBalancer.HostComputeEndpoints = endpoints + return loadBalancer.Create() +} + +// AddLoadBalancer for the specified endpoints +func AddLoadBalancer(endpoints []HostComputeEndpoint, flags LoadBalancerFlags, portMappingFlags LoadBalancerPortMappingFlags, sourceVIP string, frontendVIPs []string, protocol uint16, internalPort uint16, externalPort uint16) (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::AddLoadBalancer endpointId=%v, LoadBalancerFlags=%v, LoadBalancerPortMappingFlags=%v, sourceVIP=%s, frontendVIPs=%v, protocol=%v, internalPort=%v, externalPort=%v", endpoints, flags, portMappingFlags, sourceVIP, frontendVIPs, protocol, internalPort, externalPort) + + loadBalancer := &HostComputeLoadBalancer{ + SourceVIP: sourceVIP, + PortMappings: []LoadBalancerPortMapping{ + { + Protocol: uint32(protocol), + InternalPort: internalPort, + ExternalPort: externalPort, + Flags: portMappingFlags, + }, + }, + FrontendVIPs: frontendVIPs, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + Flags: flags, + } + + for _, endpoint := range endpoints { + loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) + } + + return loadBalancer.Create() +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer_test.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer_test.go new file mode 100644 index 00000000..785e2659 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer_test.go @@ -0,0 +1,260 @@ +// +build integration + +package hcn + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestCreateDeleteLoadBalancer(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + endpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + loadBalancer, err := HcnCreateTestLoadBalancer(endpoint) + if err != nil { + t.Fatal(err) + } + jsonString, err := json.Marshal(loadBalancer) + if err != nil { + t.Fatal(err) + } + fmt.Printf("LoadBalancer JSON:\n%s \n", jsonString) + + err = loadBalancer.Delete() + if err != nil { + t.Fatal(err) + } + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestGetLoadBalancerById(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + endpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + loadBalancer, err := HcnCreateTestLoadBalancer(endpoint) + if err != nil { + t.Fatal(err) + } + foundLB, err := GetLoadBalancerByID(loadBalancer.Id) + if err != nil { + t.Fatal(err) + } + if foundLB == nil { + t.Fatalf("No loadBalancer found") + } + err = loadBalancer.Delete() + if err != nil { + t.Fatal(err) + } + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestListLoadBalancer(t *testing.T) { + _, err := ListLoadBalancers() + if err != nil { + t.Fatal(err) + } +} + +func TestLoadBalancerAddRemoveEndpoint(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + endpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + loadBalancer, err := HcnCreateTestLoadBalancer(endpoint) + if err != nil { + t.Fatal(err) + } + + secondEndpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + updatedLB, err := loadBalancer.AddEndpoint(secondEndpoint) + if err != nil { + t.Fatal(err) + } + + if len(updatedLB.HostComputeEndpoints) != 2 { + t.Fatalf("Endpoint not added to loadBalancer") + } + updatedLB, err = loadBalancer.RemoveEndpoint(secondEndpoint) + if err != nil { + t.Fatal(err) + } + if len(updatedLB.HostComputeEndpoints) != 1 { + t.Fatalf("Endpoint not removed from loadBalancer") + } + + err = loadBalancer.Delete() + if err != nil { + t.Fatal(err) + } + err = secondEndpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestAddLoadBalancer(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + endpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + + loadBalancer, err := AddLoadBalancer([]HostComputeEndpoint{*endpoint}, LoadBalancerFlagsNone, LoadBalancerPortMappingFlagsNone, "10.0.0.1", []string{"1.1.1.2", "1.1.1.3"}, 6, 8080, 80) + if err != nil { + t.Fatal(err) + } + foundLB, err := GetLoadBalancerByID(loadBalancer.Id) + if err != nil { + t.Fatal(err) + } + if foundLB == nil { + t.Fatal(fmt.Errorf("No loadBalancer found")) + } + + err = loadBalancer.Delete() + if err != nil { + t.Fatal(err) + } + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestAddDSRLoadBalancer(t *testing.T) { + network, err := CreateTestOverlayNetwork() + if err != nil { + t.Fatal(err) + } + endpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + + portMappings := LoadBalancerPortMappingFlagsPreserveDIP | LoadBalancerPortMappingFlagsUseMux + loadBalancer, err := AddLoadBalancer([]HostComputeEndpoint{*endpoint}, LoadBalancerFlagsDSR, portMappings, "10.0.0.1", []string{"1.1.1.2", "1.1.1.3"}, 6, 8080, 80) + if err != nil { + t.Fatal(err) + } + foundLB, err := GetLoadBalancerByID(loadBalancer.Id) + if err != nil { + t.Fatal(err) + } + if foundLB == nil { + t.Fatal(fmt.Errorf("No loadBalancer found")) + } + if foundLB.Flags != 1 { + t.Fatal(fmt.Errorf("IsDSR is not set")) + } + + foundFlags := foundLB.PortMappings[0].Flags + if foundFlags&LoadBalancerPortMappingFlagsUseMux == 0 { + t.Fatal(fmt.Errorf("UseMux is not set")) + } + if foundFlags&LoadBalancerPortMappingFlagsPreserveDIP == 0 { + t.Fatal(fmt.Errorf("PreserveDIP is not set")) + } + + err = loadBalancer.Delete() + if err != nil { + t.Fatal(err) + } + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestAddILBLoadBalancer(t *testing.T) { + network, err := CreateTestOverlayNetwork() + if err != nil { + t.Fatal(err) + } + endpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + + loadBalancer, err := AddLoadBalancer([]HostComputeEndpoint{*endpoint}, LoadBalancerFlagsNone, LoadBalancerPortMappingFlagsILB, "10.0.0.1", []string{"1.1.1.2", "1.1.1.3"}, 6, 8080, 80) + if err != nil { + t.Fatal(err) + } + foundLB, err := GetLoadBalancerByID(loadBalancer.Id) + if err != nil { + t.Fatal(err) + } + if foundLB == nil { + t.Fatal(fmt.Errorf("No loadBalancer found")) + } + + foundFlags := foundLB.PortMappings[0].Flags + if foundFlags&LoadBalancerPortMappingFlagsILB == 0 { + t.Fatal(fmt.Errorf("Loadbalancer is not ILB")) + } + + err = loadBalancer.Delete() + if err != nil { + t.Fatal(err) + } + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go new file mode 100644 index 00000000..6dbef4f2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go @@ -0,0 +1,424 @@ +package hcn + +import ( + "encoding/json" + "os" + "syscall" + + icni "github.com/Microsoft/hcsshim/internal/cni" + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/regstate" + "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/sirupsen/logrus" +) + +// NamespaceResourceEndpoint represents an Endpoint attached to a Namespace. +type NamespaceResourceEndpoint struct { + Id string `json:"ID,"` +} + +// NamespaceResourceContainer represents a Container attached to a Namespace. +type NamespaceResourceContainer struct { + Id string `json:"ID,"` +} + +// NamespaceResourceType determines whether the Namespace resource is a Container or Endpoint. +type NamespaceResourceType string + +var ( + // NamespaceResourceTypeContainer are contianers associated with a Namespace. + NamespaceResourceTypeContainer NamespaceResourceType = "Container" + // NamespaceResourceTypeEndpoint are endpoints associated with a Namespace. + NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint" +) + +// NamespaceResource is associated with a namespace +type NamespaceResource struct { + Type NamespaceResourceType `json:","` // Container, Endpoint + Data json.RawMessage `json:","` +} + +// NamespaceType determines whether the Namespace is for a Host or Guest +type NamespaceType string + +var ( + // NamespaceTypeHost are host namespaces. + NamespaceTypeHost NamespaceType = "Host" + // NamespaceTypeHostDefault are host namespaces in the default compartment. + NamespaceTypeHostDefault NamespaceType = "HostDefault" + // NamespaceTypeGuest are guest namespaces. + NamespaceTypeGuest NamespaceType = "Guest" + // NamespaceTypeGuestDefault are guest namespaces in the default compartment. + NamespaceTypeGuestDefault NamespaceType = "GuestDefault" +) + +// HostComputeNamespace represents a namespace (AKA compartment) in +type HostComputeNamespace struct { + Id string `json:"ID,omitempty"` + NamespaceId uint32 `json:",omitempty"` + Type NamespaceType `json:",omitempty"` // Host, HostDefault, Guest, GuestDefault + Resources []NamespaceResource `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// ModifyNamespaceSettingRequest is the structure used to send request to modify a namespace. +// Used to Add/Remove an endpoints and containers to/from a namespace. +type ModifyNamespaceSettingRequest struct { + ResourceType NamespaceResourceType `json:",omitempty"` // Container, Endpoint + RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh + Settings json.RawMessage `json:",omitempty"` +} + +func getNamespace(namespaceGuid guid.GUID, query string) (*HostComputeNamespace, error) { + // Open namespace. + var ( + namespaceHandle hcnNamespace + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Query namespace. + hr = hcnQueryNamespaceProperties(namespaceHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close namespace. + hr = hcnCloseNamespace(namespaceHandle) + if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNamespace + var outputNamespace HostComputeNamespace + if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { + return nil, err + } + return &outputNamespace, nil +} + +func enumerateNamespaces(query string) ([]HostComputeNamespace, error) { + // Enumerate all Namespace Guids + var ( + resultBuffer *uint16 + namespaceBuffer *uint16 + ) + hr := hcnEnumerateNamespaces(query, &namespaceBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateNamespaces", hr, resultBuffer); err != nil { + return nil, err + } + + namespaces := interop.ConvertAndFreeCoTaskMemString(namespaceBuffer) + var namespaceIds []guid.GUID + if err := json.Unmarshal([]byte(namespaces), &namespaceIds); err != nil { + return nil, err + } + + var outputNamespaces []HostComputeNamespace + for _, namespaceGuid := range namespaceIds { + namespace, err := getNamespace(namespaceGuid, query) + if err != nil { + return nil, err + } + outputNamespaces = append(outputNamespaces, *namespace) + } + return outputNamespaces, nil +} + +func createNamespace(settings string) (*HostComputeNamespace, error) { + // Create new namespace. + var ( + namespaceHandle hcnNamespace + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + namespaceGuid := guid.GUID{} + hr := hcnCreateNamespace(&namespaceGuid, settings, &namespaceHandle, &resultBuffer) + if err := checkForErrors("hcnCreateNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Query namespace. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close namespace. + hr = hcnCloseNamespace(namespaceHandle) + if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNamespace + var outputNamespace HostComputeNamespace + if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { + return nil, err + } + return &outputNamespace, nil +} + +func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) { + namespaceGuid := guid.FromString(namespaceId) + // Open namespace. + var ( + namespaceHandle hcnNamespace + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Modify namespace. + hr = hcnModifyNamespace(namespaceHandle, settings, &resultBuffer) + if err := checkForErrors("hcnModifyNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Query namespace. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close namespace. + hr = hcnCloseNamespace(namespaceHandle) + if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { + return nil, err + } + // Convert output to Namespace + var outputNamespace HostComputeNamespace + if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { + return nil, err + } + return &outputNamespace, nil +} + +func deleteNamespace(namespaceId string) error { + namespaceGuid := guid.FromString(namespaceId) + var resultBuffer *uint16 + hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListNamespaces makes a call to list all available namespaces. +func ListNamespaces() ([]HostComputeNamespace, error) { + hcnQuery := defaultQuery() + namespaces, err := ListNamespacesQuery(hcnQuery) + if err != nil { + return nil, err + } + return namespaces, nil +} + +// ListNamespacesQuery makes a call to query the list of available namespaces. +func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + namespaces, err := enumerateNamespaces(string(queryJson)) + if err != nil { + return nil, err + } + return namespaces, nil +} + +// GetNamespaceByID returns the Namespace specified by Id. +func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) { + return getNamespace(guid.FromString(namespaceId), defaultQueryJson()) +} + +// GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id. +func GetNamespaceEndpointIds(namespaceId string) ([]string, error) { + namespace, err := GetNamespaceByID(namespaceId) + if err != nil { + return nil, err + } + var endpointsIds []string + for _, resource := range namespace.Resources { + if resource.Type == "Endpoint" { + var endpointResource NamespaceResourceEndpoint + if err := json.Unmarshal([]byte(resource.Data), &endpointResource); err != nil { + return nil, err + } + endpointsIds = append(endpointsIds, endpointResource.Id) + } + } + return endpointsIds, nil +} + +// GetNamespaceContainerIds returns the containers of the Namespace specified by Id. +func GetNamespaceContainerIds(namespaceId string) ([]string, error) { + namespace, err := GetNamespaceByID(namespaceId) + if err != nil { + return nil, err + } + var containerIds []string + for _, resource := range namespace.Resources { + if resource.Type == "Container" { + var contaienrResource NamespaceResourceContainer + if err := json.Unmarshal([]byte(resource.Data), &contaienrResource); err != nil { + return nil, err + } + containerIds = append(containerIds, contaienrResource.Id) + } + } + return containerIds, nil +} + +// NewNamespace creates a new Namespace object +func NewNamespace(nsType NamespaceType) *HostComputeNamespace { + return &HostComputeNamespace{ + Type: nsType, + SchemaVersion: V2SchemaVersion(), + } +} + +// Create Namespace. +func (namespace *HostComputeNamespace) Create() (*HostComputeNamespace, error) { + logrus.Debugf("hcn::HostComputeNamespace::Create id=%s", namespace.Id) + + jsonString, err := json.Marshal(namespace) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeNamespace::Create JSON: %s", jsonString) + namespace, hcnErr := createNamespace(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return namespace, nil +} + +// Delete Namespace. +func (namespace *HostComputeNamespace) Delete() error { + logrus.Debugf("hcn::HostComputeNamespace::Delete id=%s", namespace.Id) + + if err := deleteNamespace(namespace.Id); err != nil { + return err + } + return nil +} + +// Sync Namespace endpoints with the appropriate sandbox container holding the +// network namespace open. If no sandbox container is found for this namespace +// this method is determined to be a success and will not return an error in +// this case. If the sandbox container is found and a sync is initiated any +// failures will be returned via this method. +// +// This call initiates a sync between endpoints and the matching UtilityVM +// hosting those endpoints. It is safe to call for any `NamespaceType` but +// `NamespaceTypeGuest` is the only case when a sync will actually occur. For +// `NamespaceTypeHost` the process container will be automatically synchronized +// when the the endpoint is added via `AddNamespaceEndpoint`. +// +// Note: This method sync's both additions and removals of endpoints from a +// `NamespaceTypeGuest` namespace. +func (namespace *HostComputeNamespace) Sync() error { + logrus.WithField("id", namespace.Id).Debugf("hcs::HostComputeNamespace::Sync") + + // We only attempt a sync for namespace guest. + if namespace.Type != NamespaceTypeGuest { + return nil + } + + // Look in the registry for the key to map from namespace id to pod-id + cfg, err := icni.LoadPersistedNamespaceConfig(namespace.Id) + if err != nil { + if regstate.IsNotFoundError(err) { + return nil + } + return err + } + req := runhcs.VMRequest{ + ID: cfg.ContainerID, + Op: runhcs.OpSyncNamespace, + } + shimPath := runhcs.VMPipePath(cfg.HostUniqueID) + if err := runhcs.IssueVMRequest(shimPath, &req); err != nil { + // The shim is likey gone. Simply ignore the sync as if it didn't exist. + if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { + // Remove the reg key there is no point to try again + cfg.Remove() + return nil + } + f := map[string]interface{}{ + "id": namespace.Id, + "container-id": cfg.ContainerID, + } + logrus.WithFields(f). + WithError(err). + Debugf("hcs::HostComputeNamespace::Sync failed to connect to shim pipe: '%s'", shimPath) + return err + } + return nil +} + +// ModifyNamespaceSettings updates the Endpoints/Containers of a Namespace. +func ModifyNamespaceSettings(namespaceId string, request *ModifyNamespaceSettingRequest) error { + logrus.Debugf("hcn::HostComputeNamespace::ModifyNamespaceSettings id=%s", namespaceId) + + namespaceSettings, err := json.Marshal(request) + if err != nil { + return err + } + + _, err = modifyNamespace(namespaceId, string(namespaceSettings)) + if err != nil { + return err + } + return nil +} + +// AddNamespaceEndpoint adds an endpoint to a Namespace. +func AddNamespaceEndpoint(namespaceId string, endpointId string) error { + logrus.Debugf("hcn::HostComputeEndpoint::AddNamespaceEndpoint id=%s", endpointId) + + mapA := map[string]string{"EndpointId": endpointId} + settingsJson, err := json.Marshal(mapA) + if err != nil { + return err + } + requestMessage := &ModifyNamespaceSettingRequest{ + ResourceType: NamespaceResourceTypeEndpoint, + RequestType: RequestTypeAdd, + Settings: settingsJson, + } + + return ModifyNamespaceSettings(namespaceId, requestMessage) +} + +// RemoveNamespaceEndpoint removes an endpoint from a Namespace. +func RemoveNamespaceEndpoint(namespaceId string, endpointId string) error { + logrus.Debugf("hcn::HostComputeNamespace::RemoveNamespaceEndpoint id=%s", endpointId) + + mapA := map[string]string{"EndpointId": endpointId} + settingsJson, err := json.Marshal(mapA) + if err != nil { + return err + } + requestMessage := &ModifyNamespaceSettingRequest{ + ResourceType: NamespaceResourceTypeEndpoint, + RequestType: RequestTypeRemove, + Settings: settingsJson, + } + + return ModifyNamespaceSettings(namespaceId, requestMessage) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace_test.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace_test.go new file mode 100644 index 00000000..91e3b20b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace_test.go @@ -0,0 +1,451 @@ +// +build integration + +package hcn + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/Microsoft/hcsshim/internal/cni" + "github.com/Microsoft/hcsshim/internal/guid" +) + +func TestNewNamespace(t *testing.T) { + _ = NewNamespace(NamespaceTypeHost) + _ = NewNamespace(NamespaceTypeHostDefault) + _ = NewNamespace(NamespaceTypeGuest) + _ = NewNamespace(NamespaceTypeGuestDefault) +} + +func TestCreateDeleteNamespace(t *testing.T) { + namespace, err := HcnCreateTestNamespace() + if err != nil { + t.Fatal(err) + } + + jsonString, err := json.Marshal(namespace) + if err != nil { + t.Fatal(err) + } + fmt.Printf("Namespace JSON:\n%s \n", jsonString) + + err = namespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestCreateDeleteNamespaceGuest(t *testing.T) { + namespace := &HostComputeNamespace{ + Type: NamespaceTypeGuestDefault, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + hnsNamespace, err := namespace.Create() + if err != nil { + t.Fatal(err) + } + + err = hnsNamespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestGetNamespaceById(t *testing.T) { + namespace, err := HcnCreateTestNamespace() + if err != nil { + t.Fatal(err) + } + + foundNamespace, err := GetNamespaceByID(namespace.Id) + if err != nil { + t.Fatal(err) + } + if foundNamespace == nil { + t.Fatal("No namespace found") + } + + err = namespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestListNamespaces(t *testing.T) { + namespace, err := HcnCreateTestNamespace() + if err != nil { + t.Fatal(err) + } + + foundNamespaces, err := ListNamespaces() + if err != nil { + t.Fatal(err) + } + if len(foundNamespaces) == 0 { + t.Fatal("No Namespaces found") + } + + err = namespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestGetNamespaceEndpointIds(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + endpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + namespace, err := HcnCreateTestNamespace() + if err != nil { + t.Fatal(err) + } + + err = endpoint.NamespaceAttach(namespace.Id) + if err != nil { + t.Fatal(err) + } + foundEndpoints, err := GetNamespaceEndpointIds(namespace.Id) + if err != nil { + t.Fatal(err) + } + if len(foundEndpoints) == 0 { + t.Fatal("No Endpoint found") + } + err = endpoint.NamespaceDetach(namespace.Id) + if err != nil { + t.Fatal(err) + } + + err = namespace.Delete() + if err != nil { + t.Fatal(err) + } + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestGetNamespaceContainers(t *testing.T) { + namespace, err := HcnCreateTestNamespace() + if err != nil { + t.Fatal(err) + } + + foundEndpoints, err := GetNamespaceContainerIds(namespace.Id) + if err != nil { + t.Fatal(err) + } + if len(foundEndpoints) != 0 { + t.Fatal("Found containers when none should exist") + } + + err = namespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestAddRemoveNamespaceEndpoint(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + endpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + namespace, err := HcnCreateTestNamespace() + if err != nil { + t.Fatal(err) + } + + err = AddNamespaceEndpoint(namespace.Id, endpoint.Id) + if err != nil { + t.Fatal(err) + } + foundEndpoints, err := GetNamespaceEndpointIds(namespace.Id) + if err != nil { + t.Fatal(err) + } + if len(foundEndpoints) == 0 { + t.Fatal("No Endpoint found") + } + err = RemoveNamespaceEndpoint(namespace.Id, endpoint.Id) + if err != nil { + t.Fatal(err) + } + + err = namespace.Delete() + if err != nil { + t.Fatal(err) + } + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestModifyNamespaceSettings(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + endpoint, err := HcnCreateTestEndpoint(network) + if err != nil { + t.Fatal(err) + } + namespace, err := HcnCreateTestNamespace() + if err != nil { + t.Fatal(err) + } + + mapA := map[string]string{"EndpointId": endpoint.Id} + settingsJson, err := json.Marshal(mapA) + if err != nil { + t.Fatal(err) + } + requestMessage := &ModifyNamespaceSettingRequest{ + ResourceType: NamespaceResourceTypeEndpoint, + RequestType: RequestTypeAdd, + Settings: settingsJson, + } + + err = ModifyNamespaceSettings(namespace.Id, requestMessage) + if err != nil { + t.Fatal(err) + } + foundEndpoints, err := GetNamespaceEndpointIds(namespace.Id) + if err != nil { + t.Fatal(err) + } + if len(foundEndpoints) == 0 { + t.Fatal("No Endpoint found") + } + err = RemoveNamespaceEndpoint(namespace.Id, endpoint.Id) + if err != nil { + t.Fatal(err) + } + + err = namespace.Delete() + if err != nil { + t.Fatal(err) + } + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +// Sync Tests + +func TestSyncNamespaceHostDefault(t *testing.T) { + namespace := &HostComputeNamespace{ + Type: NamespaceTypeHostDefault, + NamespaceId: 5, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + hnsNamespace, err := namespace.Create() + if err != nil { + t.Fatal(err) + } + + // Host namespace types should be no-op success + err = hnsNamespace.Sync() + if err != nil { + t.Fatal(err) + } + + err = hnsNamespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestSyncNamespaceHost(t *testing.T) { + namespace := &HostComputeNamespace{ + Type: NamespaceTypeHost, + NamespaceId: 5, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + hnsNamespace, err := namespace.Create() + if err != nil { + t.Fatal(err) + } + + // Host namespace types should be no-op success + err = hnsNamespace.Sync() + if err != nil { + t.Fatal(err) + } + + err = hnsNamespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestSyncNamespaceGuestNoReg(t *testing.T) { + namespace := &HostComputeNamespace{ + Type: NamespaceTypeGuest, + NamespaceId: 5, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + hnsNamespace, err := namespace.Create() + if err != nil { + t.Fatal(err) + } + + // Guest namespace type with out reg state should be no-op success + err = hnsNamespace.Sync() + if err != nil { + t.Fatal(err) + } + + err = hnsNamespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestSyncNamespaceGuestDefaultNoReg(t *testing.T) { + namespace := &HostComputeNamespace{ + Type: NamespaceTypeGuestDefault, + NamespaceId: 5, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + hnsNamespace, err := namespace.Create() + if err != nil { + t.Fatal(err) + } + + // Guest namespace type with out reg state should be no-op success + err = hnsNamespace.Sync() + if err != nil { + t.Fatal(err) + } + + err = hnsNamespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestSyncNamespaceGuest(t *testing.T) { + namespace := &HostComputeNamespace{ + Type: NamespaceTypeGuest, + NamespaceId: 5, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + hnsNamespace, err := namespace.Create() + + if err != nil { + t.Fatal(err) + } + + // Create registry state + pnc := cni.NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New()) + err = pnc.Store() + if err != nil { + pnc.Remove() + t.Fatal(err) + } + + // Guest namespace type with reg state but not Vm shim should pass... + // after trying to connect to VM shim that it doesn't find and remove the Key so it doesn't look again. + err = hnsNamespace.Sync() + if err != nil { + t.Fatal(err) + } + + err = pnc.Remove() + if err != nil { + t.Fatal(err) + } + err = hnsNamespace.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestSyncNamespaceGuestDefault(t *testing.T) { + namespace := &HostComputeNamespace{ + Type: NamespaceTypeGuestDefault, + NamespaceId: 5, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + hnsNamespace, err := namespace.Create() + if err != nil { + t.Fatal(err) + } + + // Create registry state + pnc := cni.NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New()) + err = pnc.Store() + if err != nil { + pnc.Remove() + t.Fatal(err) + } + + // Guest namespace type with reg state but not Vm shim should pass... + // after trying to connect to VM shim that it doesn't find and remove the Key so it doesn't look again. + err = hnsNamespace.Sync() + if err != nil { + t.Fatal(err) + } + + err = pnc.Remove() + if err != nil { + t.Fatal(err) + } + err = hnsNamespace.Delete() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go new file mode 100644 index 00000000..b5f1db8b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go @@ -0,0 +1,418 @@ +package hcn + +import ( + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// Route is assoicated with a subnet. +type Route struct { + NextHop string `json:",omitempty"` + DestinationPrefix string `json:",omitempty"` + Metric uint16 `json:",omitempty"` +} + +// Subnet is assoicated with a Ipam. +type Subnet struct { + IpAddressPrefix string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + Routes []Route `json:",omitempty"` +} + +// Ipam (Internet Protocol Addres Management) is assoicated with a network +// and represents the address space(s) of a network. +type Ipam struct { + Type string `json:",omitempty"` // Ex: Static, DHCP + Subnets []Subnet `json:",omitempty"` +} + +// MacRange is associated with MacPool and respresents the start and end addresses. +type MacRange struct { + StartMacAddress string `json:",omitempty"` + EndMacAddress string `json:",omitempty"` +} + +// MacPool is assoicated with a network and represents pool of MacRanges. +type MacPool struct { + Ranges []MacRange `json:",omitempty"` +} + +// Dns (Domain Name System is associated with a network. +type Dns struct { + Domain string `json:",omitempty"` + Search []string `json:",omitempty"` + ServerList []string `json:",omitempty"` + Options []string `json:",omitempty"` +} + +// NetworkType are various networks. +type NetworkType string + +// NetworkType const +const ( + NAT NetworkType = "NAT" + Transparent NetworkType = "Transparent" + L2Bridge NetworkType = "L2Bridge" + L2Tunnel NetworkType = "L2Tunnel" + ICS NetworkType = "ICS" + Private NetworkType = "Private" + Overlay NetworkType = "Overlay" +) + +// NetworkFlags are various network flags. +type NetworkFlags uint32 + +// NetworkFlags const +const ( + None NetworkFlags = 0 + EnableNonPersistent NetworkFlags = 8 +) + +// HostComputeNetwork represents a network +type HostComputeNetwork struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + Type NetworkType `json:",omitempty"` + Policies []NetworkPolicy `json:",omitempty"` + MacPool MacPool `json:",omitempty"` + Dns Dns `json:",omitempty"` + Ipams []Ipam `json:",omitempty"` + Flags NetworkFlags `json:",omitempty"` // 0: None + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// NetworkResourceType are the 3 different Network settings resources. +type NetworkResourceType string + +var ( + // NetworkResourceTypePolicy is for Network's policies. Ex: RemoteSubnet + NetworkResourceTypePolicy NetworkResourceType = "Policy" + // NetworkResourceTypeDNS is for Network's DNS settings. + NetworkResourceTypeDNS NetworkResourceType = "DNS" + // NetworkResourceTypeExtension is for Network's extension settings. + NetworkResourceTypeExtension NetworkResourceType = "Extension" +) + +// ModifyNetworkSettingRequest is the structure used to send request to modify an network. +// Used to update DNS/extension/policy on an network. +type ModifyNetworkSettingRequest struct { + ResourceType NetworkResourceType `json:",omitempty"` // Policy, DNS, Extension + RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh + Settings json.RawMessage `json:",omitempty"` +} + +type PolicyNetworkRequest struct { + Policies []NetworkPolicy `json:",omitempty"` +} + +func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error) { + // Open network. + var ( + networkHandle hcnNetwork + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Query network. + hr = hcnQueryNetworkProperties(networkHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNetwork + var outputNetwork HostComputeNetwork + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { + return nil, err + } + return &outputNetwork, nil +} + +func enumerateNetworks(query string) ([]HostComputeNetwork, error) { + // Enumerate all Network Guids + var ( + resultBuffer *uint16 + networkBuffer *uint16 + ) + hr := hcnEnumerateNetworks(query, &networkBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateNetworks", hr, resultBuffer); err != nil { + return nil, err + } + + networks := interop.ConvertAndFreeCoTaskMemString(networkBuffer) + var networkIds []guid.GUID + if err := json.Unmarshal([]byte(networks), &networkIds); err != nil { + return nil, err + } + + var outputNetworks []HostComputeNetwork + for _, networkGuid := range networkIds { + network, err := getNetwork(networkGuid, query) + if err != nil { + return nil, err + } + outputNetworks = append(outputNetworks, *network) + } + return outputNetworks, nil +} + +func createNetwork(settings string) (*HostComputeNetwork, error) { + // Create new network. + var ( + networkHandle hcnNetwork + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + networkGuid := guid.GUID{} + hr := hcnCreateNetwork(&networkGuid, settings, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnCreateNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Query network. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNetwork + var outputNetwork HostComputeNetwork + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { + return nil, err + } + return &outputNetwork, nil +} + +func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) { + networkGuid := guid.FromString(networkId) + // Open Network + var ( + networkHandle hcnNetwork + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Modify Network + hr = hcnModifyNetwork(networkHandle, settings, &resultBuffer) + if err := checkForErrors("hcnModifyNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Query network. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNetwork + var outputNetwork HostComputeNetwork + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { + return nil, err + } + return &outputNetwork, nil +} + +func deleteNetwork(networkId string) error { + networkGuid := guid.FromString(networkId) + var resultBuffer *uint16 + hr := hcnDeleteNetwork(&networkGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListNetworks makes a call to list all available networks. +func ListNetworks() ([]HostComputeNetwork, error) { + hcnQuery := defaultQuery() + networks, err := ListNetworksQuery(hcnQuery) + if err != nil { + return nil, err + } + return networks, nil +} + +// ListNetworksQuery makes a call to query the list of available networks. +func ListNetworksQuery(query HostComputeQuery) ([]HostComputeNetwork, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + networks, err := enumerateNetworks(string(queryJson)) + if err != nil { + return nil, err + } + return networks, nil +} + +// GetNetworkByID returns the network specified by Id. +func GetNetworkByID(networkID string) (*HostComputeNetwork, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": networkID} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + networks, err := ListNetworksQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(networks) == 0 { + return nil, NetworkNotFoundError{NetworkID: networkID} + } + return &networks[0], err +} + +// GetNetworkByName returns the network specified by Name. +func GetNetworkByName(networkName string) (*HostComputeNetwork, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"Name": networkName} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + networks, err := ListNetworksQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(networks) == 0 { + return nil, NetworkNotFoundError{NetworkName: networkName} + } + return &networks[0], err +} + +// Create Network. +func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) { + logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id) + + jsonString, err := json.Marshal(network) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeNetwork::Create JSON: %s", jsonString) + network, hcnErr := createNetwork(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return network, nil +} + +// Delete Network. +func (network *HostComputeNetwork) Delete() error { + logrus.Debugf("hcn::HostComputeNetwork::Delete id=%s", network.Id) + + if err := deleteNetwork(network.Id); err != nil { + return err + } + return nil +} + +// ModifyNetworkSettings updates the Policy for a network. +func (network *HostComputeNetwork) ModifyNetworkSettings(request *ModifyNetworkSettingRequest) error { + logrus.Debugf("hcn::HostComputeNetwork::ModifyNetworkSettings id=%s", network.Id) + + networkSettingsRequest, err := json.Marshal(request) + if err != nil { + return err + } + + _, err = modifyNetwork(network.Id, string(networkSettingsRequest)) + if err != nil { + return err + } + return nil +} + +// AddPolicy applies a Policy (ex: RemoteSubnet) on the Network. +func (network *HostComputeNetwork) AddPolicy(networkPolicy PolicyNetworkRequest) error { + logrus.Debugf("hcn::HostComputeNetwork::AddPolicy id=%s", network.Id) + + settingsJson, err := json.Marshal(networkPolicy) + if err != nil { + return err + } + requestMessage := &ModifyNetworkSettingRequest{ + ResourceType: NetworkResourceTypePolicy, + RequestType: RequestTypeAdd, + Settings: settingsJson, + } + + return network.ModifyNetworkSettings(requestMessage) +} + +// RemovePolicy removes a Policy (ex: RemoteSubnet) from the Network. +func (network *HostComputeNetwork) RemovePolicy(networkPolicy PolicyNetworkRequest) error { + logrus.Debugf("hcn::HostComputeNetwork::RemovePolicy id=%s", network.Id) + + settingsJson, err := json.Marshal(networkPolicy) + if err != nil { + return err + } + requestMessage := &ModifyNetworkSettingRequest{ + ResourceType: NetworkResourceTypePolicy, + RequestType: RequestTypeRemove, + Settings: settingsJson, + } + + return network.ModifyNetworkSettings(requestMessage) +} + +// CreateEndpoint creates an endpoint on the Network. +func (network *HostComputeNetwork) CreateEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { + isRemote := endpoint.Flags&EndpointFlagsRemoteEndpoint != 0 + logrus.Debugf("hcn::HostComputeNetwork::CreatEndpoint, networkId=%s remote=%t", network.Id, isRemote) + + endpoint.HostComputeNetwork = network.Id + endpointSettings, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + newEndpoint, err := createEndpoint(network.Id, string(endpointSettings)) + if err != nil { + return nil, err + } + return newEndpoint, nil +} + +// CreateRemoteEndpoint creates a remote endpoint on the Network. +func (network *HostComputeNetwork) CreateRemoteEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { + endpoint.Flags = EndpointFlagsRemoteEndpoint | endpoint.Flags + return network.CreateEndpoint(endpoint) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork_test.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork_test.go new file mode 100644 index 00000000..eaac2555 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork_test.go @@ -0,0 +1,165 @@ +// +build integration + +package hcn + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestCreateDeleteNetwork(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + jsonString, err := json.Marshal(network) + if err != nil { + t.Fatal(err) + } + fmt.Printf("Network JSON:\n%s \n", jsonString) + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestGetNetworkByName(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + network, err = GetNetworkByName(network.Name) + if err != nil { + t.Fatal(err) + } + if network == nil { + t.Fatal("No Network found") + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestGetNetworkById(t *testing.T) { + network, err := HcnCreateTestNATNetwork() + if err != nil { + t.Fatal(err) + } + network, err = GetNetworkByID(network.Id) + if err != nil { + t.Fatal(err) + } + if network == nil { + t.Fatal("No Network found") + } + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestListNetwork(t *testing.T) { + _, err := ListNetworks() + if err != nil { + t.Fatal(err) + } +} + +func testNetworkPolicy(t *testing.T, policiesToTest *PolicyNetworkRequest) { + network, err := CreateTestOverlayNetwork() + if err != nil { + t.Fatal(err) + } + + network.AddPolicy(*policiesToTest) + + //Reload the network object from HNS. + network, err = GetNetworkByID(network.Id) + if err != nil { + t.Fatal(err) + } + + for _, policyToTest := range policiesToTest.Policies { + foundPolicy := false + for _, policy := range network.Policies { + if policy.Type == policyToTest.Type { + foundPolicy = true + break + } + } + if !foundPolicy { + t.Fatalf("Could not find %s policy on network.", policyToTest.Type) + } + } + + network.RemovePolicy(*policiesToTest) + + //Reload the network object from HNS. + network, err = GetNetworkByID(network.Id) + if err != nil { + t.Fatal(err) + } + + for _, policyToTest := range policiesToTest.Policies { + foundPolicy := false + for _, policy := range network.Policies { + if policy.Type == policyToTest.Type { + foundPolicy = true + break + } + } + if foundPolicy { + t.Fatalf("Found %s policy on network when it should have been deleted.", policyToTest.Type) + } + } + + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestAddRemoveRemoteSubnetRoutePolicy(t *testing.T) { + + remoteSubnetRoutePolicy, err := HcnCreateTestRemoteSubnetRoute() + if err != nil { + t.Fatal(err) + } + + testNetworkPolicy(t, remoteSubnetRoutePolicy) +} + +func TestAddRemoveHostRoutePolicy(t *testing.T) { + + hostRoutePolicy, err := HcnCreateTestHostRoute() + if err != nil { + t.Fatal(err) + } + + testNetworkPolicy(t, hostRoutePolicy) +} + +func TestNetworkFlags(t *testing.T) { + + network, err := CreateTestOverlayNetwork() + if err != nil { + t.Fatal(err) + } + + //Reload the network object from HNS. + network, err = GetNetworkByID(network.Id) + if err != nil { + t.Fatal(err) + } + + if network.Flags != EnableNonPersistent { + t.Errorf("EnableNonPersistent flag (%d) is not set on network", EnableNonPersistent) + } + + err = network.Delete() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go new file mode 100644 index 00000000..6b12d73c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go @@ -0,0 +1,217 @@ +package hcn + +import "encoding/json" + +// EndpointPolicyType are the potential Policies that apply to Endpoints. +type EndpointPolicyType string + +// EndpointPolicyType const +const ( + PortMapping EndpointPolicyType = "PortMapping" + ACL EndpointPolicyType = "ACL" + QOS EndpointPolicyType = "QOS" + L2Driver EndpointPolicyType = "L2Driver" + OutBoundNAT EndpointPolicyType = "OutBoundNAT" + SDNRoute EndpointPolicyType = "SDNRoute" + L4Proxy EndpointPolicyType = "L4Proxy" + PortName EndpointPolicyType = "PortName" + EncapOverhead EndpointPolicyType = "EncapOverhead" + // Endpoint and Network have InterfaceConstraint and ProviderAddress + NetworkProviderAddress EndpointPolicyType = "ProviderAddress" + NetworkInterfaceConstraint EndpointPolicyType = "InterfaceConstraint" +) + +// EndpointPolicy is a collection of Policy settings for an Endpoint. +type EndpointPolicy struct { + Type EndpointPolicyType `json:""` + Settings json.RawMessage `json:",omitempty"` +} + +// NetworkPolicyType are the potential Policies that apply to Networks. +type NetworkPolicyType string + +// NetworkPolicyType const +const ( + SourceMacAddress NetworkPolicyType = "SourceMacAddress" + NetAdapterName NetworkPolicyType = "NetAdapterName" + VSwitchExtension NetworkPolicyType = "VSwitchExtension" + DrMacAddress NetworkPolicyType = "DrMacAddress" + AutomaticDNS NetworkPolicyType = "AutomaticDNS" + InterfaceConstraint NetworkPolicyType = "InterfaceConstraint" + ProviderAddress NetworkPolicyType = "ProviderAddress" + RemoteSubnetRoute NetworkPolicyType = "RemoteSubnetRoute" + HostRoute NetworkPolicyType = "HostRoute" +) + +// NetworkPolicy is a collection of Policy settings for a Network. +type NetworkPolicy struct { + Type NetworkPolicyType `json:""` + Settings json.RawMessage `json:",omitempty"` +} + +// SubnetPolicyType are the potential Policies that apply to Subnets. +type SubnetPolicyType string + +// SubnetPolicyType const +const ( + VLAN SubnetPolicyType = "VLAN" + VSID SubnetPolicyType = "VSID" +) + +// SubnetPolicy is a collection of Policy settings for a Subnet. +type SubnetPolicy struct { + Type SubnetPolicyType `json:""` + Settings json.RawMessage `json:",omitempty"` +} + +/// Endpoint Policy objects + +// PortMappingPolicySetting defines Port Mapping (NAT) +type PortMappingPolicySetting struct { + Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + VIP string `json:",omitempty"` +} + +// ActionType associated with ACLs. Value is either Allow or Block. +type ActionType string + +// DirectionType associated with ACLs. Value is either In or Out. +type DirectionType string + +// RuleType associated with ACLs. Value is either Host (WFP) or Switch (VFP). +type RuleType string + +const ( + // Allow traffic + ActionTypeAllow ActionType = "Allow" + // Block traffic + ActionTypeBlock ActionType = "Block" + + // In is traffic coming to the Endpoint + DirectionTypeIn DirectionType = "In" + // Out is traffic leaving the Endpoint + DirectionTypeOut DirectionType = "Out" + + // Host creates WFP (Windows Firewall) rules + RuleTypeHost RuleType = "Host" + // Switch creates VFP (Virtual Filter Platform) rules + RuleTypeSwitch RuleType = "Switch" +) + +// AclPolicySetting creates firewall rules on an endpoint +type AclPolicySetting struct { + Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) + Action ActionType `json:","` + Direction DirectionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + RuleType RuleType `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// QosPolicySetting sets Quality of Service bandwidth caps on an Endpoint. +type QosPolicySetting struct { + MaximumOutgoingBandwidthInBytes uint64 +} + +// OutboundNatPolicySetting sets outbound Network Address Translation on an Endpoint. +type OutboundNatPolicySetting struct { + VirtualIP string `json:",omitempty"` + Exceptions []string `json:",omitempty"` +} + +// SDNRoutePolicySetting sets SDN Route on an Endpoint. +type SDNRoutePolicySetting struct { + DestinationPrefix string `json:",omitempty"` + NextHop string `json:",omitempty"` + NeedEncap bool `json:",omitempty"` +} + +// L4ProxyPolicySetting sets Layer-4 Proxy on an endpoint. +type L4ProxyPolicySetting struct { + IP string `json:",omitempty"` + Port string `json:",omitempty"` + Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 + ExceptionList []string `json:",omitempty"` + Destination string `json:","` + OutboundNat bool `json:",omitempty"` +} + +// PortnameEndpointPolicySetting sets the port name for an endpoint. +type PortnameEndpointPolicySetting struct { + Name string `json:",omitempty"` +} + +// EncapOverheadEndpointPolicySetting sets the encap overhead for an endpoint. +type EncapOverheadEndpointPolicySetting struct { + Overhead uint16 `json:",omitempty"` +} + +/// Endpoint and Network Policy objects + +// ProviderAddressEndpointPolicySetting sets the PA for an endpoint. +type ProviderAddressEndpointPolicySetting struct { + ProviderAddress string `json:",omitempty"` +} + +// InterfaceConstraintPolicySetting limits an Endpoint or Network to a specific Nic. +type InterfaceConstraintPolicySetting struct { + InterfaceGuid string `json:",omitempty"` + InterfaceLuid uint64 `json:",omitempty"` + InterfaceIndex uint32 `json:",omitempty"` + InterfaceMediaType uint32 `json:",omitempty"` + InterfaceAlias string `json:",omitempty"` + InterfaceDescription string `json:",omitempty"` +} + +/// Network Policy objects + +// SourceMacAddressNetworkPolicySetting sets source MAC for a network. +type SourceMacAddressNetworkPolicySetting struct { + SourceMacAddress string `json:",omitempty"` +} + +// NetAdapterNameNetworkPolicySetting sets network adapter of a network. +type NetAdapterNameNetworkPolicySetting struct { + NetworkAdapterName string `json:",omitempty"` +} + +// VSwitchExtensionNetworkPolicySetting enables/disabled VSwitch extensions for a network. +type VSwitchExtensionNetworkPolicySetting struct { + ExtensionID string `json:",omitempty"` + Enable bool `json:",omitempty"` +} + +// DrMacAddressNetworkPolicySetting sets the DR MAC for a network. +type DrMacAddressNetworkPolicySetting struct { + Address string `json:",omitempty"` +} + +// AutomaticDNSNetworkPolicySetting enables/disables automatic DNS on a network. +type AutomaticDNSNetworkPolicySetting struct { + Enable bool `json:",omitempty"` +} + +/// Subnet Policy objects + +// VlanPolicySetting isolates a subnet with VLAN tagging. +type VlanPolicySetting struct { + IsolationId uint32 `json:","` +} + +// VsidPolicySetting isolates a subnet with VSID tagging. +type VsidPolicySetting struct { + IsolationId uint32 `json:","` +} + +// RemoteSubnetRoutePolicySetting creates remote subnet route rules on a network +type RemoteSubnetRoutePolicySetting struct { + DestinationPrefix string + IsolationId uint16 + ProviderAddress string + DistributedRouterMacAddress string +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go new file mode 100644 index 00000000..9b5df203 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go @@ -0,0 +1,71 @@ +package hcn + +import ( + "github.com/sirupsen/logrus" +) + +// SupportedFeatures are the features provided by the Service. +type SupportedFeatures struct { + Acl AclFeatures `json:"ACL"` + Api ApiSupport `json:"API"` + RemoteSubnet bool `json:"RemoteSubnet"` + HostRoute bool `json:"HostRoute"` + DSR bool `json:"DSR"` +} + +// AclFeatures are the supported ACL possibilities. +type AclFeatures struct { + AclAddressLists bool `json:"AclAddressLists"` + AclNoHostRulePriority bool `json:"AclHostRulePriority"` + AclPortRanges bool `json:"AclPortRanges"` + AclRuleId bool `json:"AclRuleId"` +} + +// ApiSupport lists the supported API versions. +type ApiSupport struct { + V1 bool `json:"V1"` + V2 bool `json:"V2"` +} + +// GetSupportedFeatures returns the features supported by the Service. +func GetSupportedFeatures() SupportedFeatures { + var features SupportedFeatures + + globals, err := GetGlobals() + if err != nil { + // Expected on pre-1803 builds, all features will be false/unsupported + logrus.Debugf("Unable to obtain globals: %s", err) + return features + } + + features.Acl = AclFeatures{ + AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803), + AclNoHostRulePriority: isFeatureSupported(globals.Version, HNSVersion1803), + AclPortRanges: isFeatureSupported(globals.Version, HNSVersion1803), + AclRuleId: isFeatureSupported(globals.Version, HNSVersion1803), + } + + features.Api = ApiSupport{ + V2: isFeatureSupported(globals.Version, V2ApiSupport), + V1: true, // HNSCall is still available. + } + + features.RemoteSubnet = isFeatureSupported(globals.Version, RemoteSubnetVersion) + features.HostRoute = isFeatureSupported(globals.Version, HostRouteVersion) + features.DSR = isFeatureSupported(globals.Version, DSRVersion) + + return features +} + +func isFeatureSupported(currentVersion Version, minVersionSupported Version) bool { + if currentVersion.Major < minVersionSupported.Major { + return false + } + if currentVersion.Major > minVersionSupported.Major { + return true + } + if currentVersion.Minor < minVersionSupported.Minor { + return false + } + return true +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport_test.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport_test.go new file mode 100644 index 00000000..b4a02115 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport_test.go @@ -0,0 +1,62 @@ +// +build integration + +package hcn + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestSupportedFeatures(t *testing.T) { + supportedFeatures := GetSupportedFeatures() + jsonString, err := json.Marshal(supportedFeatures) + if err != nil { + t.Fatal(err) + } + fmt.Printf("Supported Features:\n%s \n", jsonString) +} + +func TestV2ApiSupport(t *testing.T) { + supportedFeatures := GetSupportedFeatures() + err := V2ApiSupported() + if supportedFeatures.Api.V2 && err != nil { + t.Fatal(err) + } + if !supportedFeatures.Api.V2 && err == nil { + t.Fatal(err) + } +} + +func TestRemoteSubnetSupport(t *testing.T) { + supportedFeatures := GetSupportedFeatures() + err := RemoteSubnetSupported() + if supportedFeatures.RemoteSubnet && err != nil { + t.Fatal(err) + } + if !supportedFeatures.RemoteSubnet && err == nil { + t.Fatal(err) + } +} + +func TestHostRouteSupport(t *testing.T) { + supportedFeatures := GetSupportedFeatures() + err := HostRouteSupported() + if supportedFeatures.HostRoute && err != nil { + t.Fatal(err) + } + if !supportedFeatures.HostRoute && err == nil { + t.Fatal(err) + } +} + +func TestDSRSupport(t *testing.T) { + supportedFeatures := GetSupportedFeatures() + err := DSRSupported() + if supportedFeatures.DSR && err != nil { + t.Fatal(err) + } + if !supportedFeatures.DSR && err == nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnutils_test.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnutils_test.go new file mode 100644 index 00000000..afc40cf4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnutils_test.go @@ -0,0 +1,267 @@ +// +build integration + +package hcn + +import ( + "encoding/json" +) + +func cleanup(networkName string) { + // Delete test network (if exists) + testNetwork, err := GetNetworkByName(networkName) + if err != nil { + return + } + if testNetwork != nil { + err := testNetwork.Delete() + if err != nil { + return + } + } +} + +func HcnCreateTestNATNetwork() (*HostComputeNetwork, error) { + cleanup(NatTestNetworkName) + network := &HostComputeNetwork{ + Type: "NAT", + Name: NatTestNetworkName, + MacPool: MacPool{ + Ranges: []MacRange{ + { + StartMacAddress: "00-15-5D-52-C0-00", + EndMacAddress: "00-15-5D-52-CF-FF", + }, + }, + }, + Ipams: []Ipam{ + { + Type: "Static", + Subnets: []Subnet{ + { + IpAddressPrefix: "192.168.100.0/24", + Routes: []Route{ + { + NextHop: "192.168.100.1", + DestinationPrefix: "0.0.0.0", + }, + }, + }, + }, + }, + }, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + return network.Create() +} + +func CreateTestOverlayNetwork() (*HostComputeNetwork, error) { + cleanup(OverlayTestNetworkName) + network := &HostComputeNetwork{ + Type: "Overlay", + Name: OverlayTestNetworkName, + MacPool: MacPool{ + Ranges: []MacRange{ + { + StartMacAddress: "00-15-5D-52-C0-00", + EndMacAddress: "00-15-5D-52-CF-FF", + }, + }, + }, + Ipams: []Ipam{ + { + Type: "Static", + Subnets: []Subnet{ + { + IpAddressPrefix: "192.168.100.0/24", + Routes: []Route{ + { + NextHop: "192.168.100.1", + DestinationPrefix: "0.0.0.0/0", + }, + }, + }, + }, + }, + }, + Flags: EnableNonPersistent, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + vsid := &VsidPolicySetting{ + IsolationId: 5000, + } + vsidJson, err := json.Marshal(vsid) + if err != nil { + return nil, err + } + + sp := &SubnetPolicy{ + Type: VSID, + } + sp.Settings = vsidJson + + spJson, err := json.Marshal(sp) + if err != nil { + return nil, err + } + + network.Ipams[0].Subnets[0].Policies = append(network.Ipams[0].Subnets[0].Policies, spJson) + + return network.Create() +} + +func HcnCreateTestEndpoint(network *HostComputeNetwork) (*HostComputeEndpoint, error) { + if network == nil { + + } + Endpoint := &HostComputeEndpoint{ + Name: NatTestEndpointName, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + return network.CreateEndpoint(Endpoint) +} + +func HcnCreateTestEndpointWithNamespace(network *HostComputeNetwork, namespace *HostComputeNamespace) (*HostComputeEndpoint, error) { + Endpoint := &HostComputeEndpoint{ + Name: NatTestEndpointName, + HostComputeNamespace: namespace.Id, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + return network.CreateEndpoint(Endpoint) +} + +func HcnCreateTestNamespace() (*HostComputeNamespace, error) { + namespace := &HostComputeNamespace{ + Type: NamespaceTypeHostDefault, + NamespaceId: 5, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + return namespace.Create() +} + +func HcnCreateAcls() (*PolicyEndpointRequest, error) { + in := AclPolicySetting{ + Protocols: "6", + Action: ActionTypeAllow, + Direction: DirectionTypeIn, + LocalAddresses: "192.168.100.0/24,10.0.0.21", + RemoteAddresses: "192.168.100.0/24,10.0.0.21", + LocalPorts: "80,8080", + RemotePorts: "80,8080", + RuleType: RuleTypeSwitch, + Priority: 200, + } + + rawJSON, err := json.Marshal(in) + if err != nil { + return nil, err + } + inPolicy := EndpointPolicy{ + Type: ACL, + Settings: rawJSON, + } + + out := AclPolicySetting{ + Protocols: "6", + Action: ActionTypeAllow, + Direction: DirectionTypeOut, + LocalAddresses: "192.168.100.0/24,10.0.0.21", + RemoteAddresses: "192.168.100.0/24,10.0.0.21", + LocalPorts: "80,8080", + RemotePorts: "80,8080", + RuleType: RuleTypeSwitch, + Priority: 200, + } + + rawJSON, err = json.Marshal(out) + if err != nil { + return nil, err + } + outPolicy := EndpointPolicy{ + Type: ACL, + Settings: rawJSON, + } + + endpointRequest := PolicyEndpointRequest{ + Policies: []EndpointPolicy{inPolicy, outPolicy}, + } + + return &endpointRequest, nil +} + +func HcnCreateTestLoadBalancer(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { + loadBalancer := &HostComputeLoadBalancer{ + HostComputeEndpoints: []string{endpoint.Id}, + SourceVIP: "10.0.0.1", + PortMappings: []LoadBalancerPortMapping{ + { + Protocol: 6, // TCP + InternalPort: 8080, + ExternalPort: 8090, + }, + }, + FrontendVIPs: []string{"1.1.1.2", "1.1.1.3"}, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + } + + return loadBalancer.Create() +} + +func HcnCreateTestRemoteSubnetRoute() (*PolicyNetworkRequest, error) { + rsr := RemoteSubnetRoutePolicySetting{ + DestinationPrefix: "192.168.2.0/24", + IsolationId: 5000, + ProviderAddress: "1.1.1.1", + DistributedRouterMacAddress: "00-12-34-56-78-9a", + } + + rawJSON, err := json.Marshal(rsr) + if err != nil { + return nil, err + } + rsrPolicy := NetworkPolicy{ + Type: RemoteSubnetRoute, + Settings: rawJSON, + } + + networkRequest := PolicyNetworkRequest{ + Policies: []NetworkPolicy{rsrPolicy}, + } + + return &networkRequest, nil +} + +func HcnCreateTestHostRoute() (*PolicyNetworkRequest, error) { + hostRoutePolicy := NetworkPolicy{ + Type: HostRoute, + Settings: []byte("{}"), + } + + networkRequest := PolicyNetworkRequest{ + Policies: []NetworkPolicy{hostRoutePolicy}, + } + + return &networkRequest, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnv1schema_test.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnv1schema_test.go new file mode 100644 index 00000000..fd300ed0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnv1schema_test.go @@ -0,0 +1,111 @@ +// +build integration + +package hcn + +import ( + "encoding/json" + "testing" + + "github.com/Microsoft/hcsshim" +) + +func TestV1Network(t *testing.T) { + cleanup(NatTestNetworkName) + + v1network := hcsshim.HNSNetwork{ + Type: "NAT", + Name: NatTestNetworkName, + MacPools: []hcsshim.MacPool{ + { + StartMacAddress: "00-15-5D-52-C0-00", + EndMacAddress: "00-15-5D-52-CF-FF", + }, + }, + Subnets: []hcsshim.Subnet{ + { + AddressPrefix: "192.168.100.0/24", + GatewayAddress: "192.168.100.1", + }, + }, + } + + jsonString, err := json.Marshal(v1network) + if err != nil { + t.Fatal(err) + t.Fail() + } + + network, err := createNetwork(string(jsonString)) + if err != nil { + t.Fatal(err) + t.Fail() + } + + err = network.Delete() + if err != nil { + t.Fatal(err) + t.Fail() + } +} + +func TestV1Endpoint(t *testing.T) { + cleanup(NatTestNetworkName) + + v1network := hcsshim.HNSNetwork{ + Type: "NAT", + Name: NatTestNetworkName, + MacPools: []hcsshim.MacPool{ + { + StartMacAddress: "00-15-5D-52-C0-00", + EndMacAddress: "00-15-5D-52-CF-FF", + }, + }, + Subnets: []hcsshim.Subnet{ + { + AddressPrefix: "192.168.100.0/24", + GatewayAddress: "192.168.100.1", + }, + }, + } + + jsonString, err := json.Marshal(v1network) + if err != nil { + t.Fatal(err) + t.Fail() + } + + network, err := createNetwork(string(jsonString)) + if err != nil { + t.Fatal(err) + t.Fail() + } + + v1endpoint := hcsshim.HNSEndpoint{ + Name: NatTestEndpointName, + VirtualNetwork: network.Id, + } + + jsonString, err = json.Marshal(v1endpoint) + if err != nil { + t.Fatal(err) + t.Fail() + } + + endpoint, err := createEndpoint(network.Id, string(jsonString)) + if err != nil { + t.Fatal(err) + t.Fail() + } + + err = endpoint.Delete() + if err != nil { + t.Fatal(err) + t.Fail() + } + + err = network.Delete() + if err != nil { + t.Fatal(err) + t.Fail() + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hnsv1_test.go b/vendor/github.com/Microsoft/hcsshim/hcn/hnsv1_test.go new file mode 100644 index 00000000..e5a86747 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hnsv1_test.go @@ -0,0 +1,97 @@ +// +build integration + +package hcn + +import ( + "os" + "testing" + + "github.com/Microsoft/hcsshim" +) + +const ( + NatTestNetworkName string = "GoTestNat" + NatTestEndpointName string = "GoTestNatEndpoint" + OverlayTestNetworkName string = "GoTestOverlay" +) + +func TestMain(m *testing.M) { + os.Exit(m.Run()) +} + +func CreateTestNetwork() (*hcsshim.HNSNetwork, error) { + network := &hcsshim.HNSNetwork{ + Type: "NAT", + Name: NatTestNetworkName, + Subnets: []hcsshim.Subnet{ + { + AddressPrefix: "192.168.100.0/24", + GatewayAddress: "192.168.100.1", + }, + }, + } + + return network.Create() +} + +func TestEndpoint(t *testing.T) { + + network, err := CreateTestNetwork() + if err != nil { + t.Fatal(err) + } + + Endpoint := &hcsshim.HNSEndpoint{ + Name: NatTestEndpointName, + } + + Endpoint, err = network.CreateEndpoint(Endpoint) + if err != nil { + t.Fatal(err) + } + + err = Endpoint.HostAttach(1) + if err != nil { + t.Fatal(err) + } + + err = Endpoint.HostDetach() + if err != nil { + t.Fatal(err) + } + + _, err = Endpoint.Delete() + if err != nil { + t.Fatal(err) + } + + _, err = network.Delete() + if err != nil { + t.Fatal(err) + } +} + +func TestEndpointGetAll(t *testing.T) { + _, err := hcsshim.HNSListEndpointRequest() + if err != nil { + t.Fatal(err) + } +} + +func TestNetworkGetAll(t *testing.T) { + _, err := hcsshim.HNSListNetworkRequest("GET", "", "") + if err != nil { + t.Fatal(err) + } +} + +func TestNetwork(t *testing.T) { + network, err := CreateTestNetwork() + if err != nil { + t.Fatal(err) + } + _, err = network.Delete() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go new file mode 100644 index 00000000..856b2c14 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go @@ -0,0 +1,714 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hcn + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + modcomputenetwork = windows.NewLazySystemDLL("computenetwork.dll") + + procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") + procHNSCall = modvmcompute.NewProc("HNSCall") + procHcnEnumerateNetworks = modcomputenetwork.NewProc("HcnEnumerateNetworks") + procHcnCreateNetwork = modcomputenetwork.NewProc("HcnCreateNetwork") + procHcnOpenNetwork = modcomputenetwork.NewProc("HcnOpenNetwork") + procHcnModifyNetwork = modcomputenetwork.NewProc("HcnModifyNetwork") + procHcnQueryNetworkProperties = modcomputenetwork.NewProc("HcnQueryNetworkProperties") + procHcnDeleteNetwork = modcomputenetwork.NewProc("HcnDeleteNetwork") + procHcnCloseNetwork = modcomputenetwork.NewProc("HcnCloseNetwork") + procHcnEnumerateEndpoints = modcomputenetwork.NewProc("HcnEnumerateEndpoints") + procHcnCreateEndpoint = modcomputenetwork.NewProc("HcnCreateEndpoint") + procHcnOpenEndpoint = modcomputenetwork.NewProc("HcnOpenEndpoint") + procHcnModifyEndpoint = modcomputenetwork.NewProc("HcnModifyEndpoint") + procHcnQueryEndpointProperties = modcomputenetwork.NewProc("HcnQueryEndpointProperties") + procHcnDeleteEndpoint = modcomputenetwork.NewProc("HcnDeleteEndpoint") + procHcnCloseEndpoint = modcomputenetwork.NewProc("HcnCloseEndpoint") + procHcnEnumerateNamespaces = modcomputenetwork.NewProc("HcnEnumerateNamespaces") + procHcnCreateNamespace = modcomputenetwork.NewProc("HcnCreateNamespace") + procHcnOpenNamespace = modcomputenetwork.NewProc("HcnOpenNamespace") + procHcnModifyNamespace = modcomputenetwork.NewProc("HcnModifyNamespace") + procHcnQueryNamespaceProperties = modcomputenetwork.NewProc("HcnQueryNamespaceProperties") + procHcnDeleteNamespace = modcomputenetwork.NewProc("HcnDeleteNamespace") + procHcnCloseNamespace = modcomputenetwork.NewProc("HcnCloseNamespace") + procHcnEnumerateLoadBalancers = modcomputenetwork.NewProc("HcnEnumerateLoadBalancers") + procHcnCreateLoadBalancer = modcomputenetwork.NewProc("HcnCreateLoadBalancer") + procHcnOpenLoadBalancer = modcomputenetwork.NewProc("HcnOpenLoadBalancer") + procHcnModifyLoadBalancer = modcomputenetwork.NewProc("HcnModifyLoadBalancer") + procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties") + procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer") + procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer") + procHcnOpenService = modcomputenetwork.NewProc("HcnOpenService") + procHcnRegisterServiceCallback = modcomputenetwork.NewProc("HcnRegisterServiceCallback") + procHcnUnregisterServiceCallback = modcomputenetwork.NewProc("HcnUnregisterServiceCallback") + procHcnCloseService = modcomputenetwork.NewProc("HcnCloseService") +) + +func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { + r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func _hnsCall(method string, path string, object string, response **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(method) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(object) + if hr != nil { + return + } + return __hnsCall(_p0, _p1, _p2, response) +} + +func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { + if hr = procHNSCall.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateNetworks(_p0, networks, result) +} + +func _hcnEnumerateNetworks(query *uint16, networks **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateNetworks.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateNetworks.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(networks)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateNetwork(id, _p0, network, result) +} + +func _hcnCreateNetwork(id *_guid, settings *uint16, network *hcnNetwork, result **uint16) (hr error) { + if hr = procHcnCreateNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateNetwork.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) { + if hr = procHcnOpenNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenNetwork.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyNetwork(network, _p0, result) +} + +func _hcnModifyNetwork(network hcnNetwork, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyNetwork.Addr(), 3, uintptr(network), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryNetworkProperties(network, _p0, properties, result) +} + +func _hcnQueryNetworkProperties(network hcnNetwork, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryNetworkProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryNetworkProperties.Addr(), 4, uintptr(network), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteNetwork(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteNetwork.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseNetwork(network hcnNetwork) (hr error) { + if hr = procHcnCloseNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseNetwork.Addr(), 1, uintptr(network), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateEndpoints(_p0, endpoints, result) +} + +func _hcnEnumerateEndpoints(query *uint16, endpoints **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateEndpoints.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateEndpoints.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(endpoints)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateEndpoint(network, id, _p0, endpoint, result) +} + +func _hcnCreateEndpoint(network hcnNetwork, id *_guid, settings *uint16, endpoint *hcnEndpoint, result **uint16) (hr error) { + if hr = procHcnCreateEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateEndpoint.Addr(), 5, uintptr(network), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) { + if hr = procHcnOpenEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenEndpoint.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyEndpoint(endpoint, _p0, result) +} + +func _hcnModifyEndpoint(endpoint hcnEndpoint, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyEndpoint.Addr(), 3, uintptr(endpoint), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryEndpointProperties(endpoint, _p0, properties, result) +} + +func _hcnQueryEndpointProperties(endpoint hcnEndpoint, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryEndpointProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryEndpointProperties.Addr(), 4, uintptr(endpoint), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteEndpoint.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) { + if hr = procHcnCloseEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseEndpoint.Addr(), 1, uintptr(endpoint), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateNamespaces(_p0, namespaces, result) +} + +func _hcnEnumerateNamespaces(query *uint16, namespaces **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateNamespaces.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateNamespaces.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(namespaces)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateNamespace(id, _p0, namespace, result) +} + +func _hcnCreateNamespace(id *_guid, settings *uint16, namespace *hcnNamespace, result **uint16) (hr error) { + if hr = procHcnCreateNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateNamespace.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) { + if hr = procHcnOpenNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenNamespace.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyNamespace(namespace, _p0, result) +} + +func _hcnModifyNamespace(namespace hcnNamespace, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyNamespace.Addr(), 3, uintptr(namespace), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryNamespaceProperties(namespace, _p0, properties, result) +} + +func _hcnQueryNamespaceProperties(namespace hcnNamespace, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryNamespaceProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryNamespaceProperties.Addr(), 4, uintptr(namespace), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteNamespace(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteNamespace.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseNamespace(namespace hcnNamespace) (hr error) { + if hr = procHcnCloseNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseNamespace.Addr(), 1, uintptr(namespace), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateLoadBalancers(_p0, loadBalancers, result) +} + +func _hcnEnumerateLoadBalancers(query *uint16, loadBalancers **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateLoadBalancers.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateLoadBalancers.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(loadBalancers)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateLoadBalancer(id, _p0, loadBalancer, result) +} + +func _hcnCreateLoadBalancer(id *_guid, settings *uint16, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + if hr = procHcnCreateLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateLoadBalancer.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + if hr = procHcnOpenLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenLoadBalancer.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyLoadBalancer(loadBalancer, _p0, result) +} + +func _hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyLoadBalancer.Addr(), 3, uintptr(loadBalancer), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryLoadBalancerProperties(loadBalancer, _p0, properties, result) +} + +func _hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryLoadBalancerProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryLoadBalancerProperties.Addr(), 4, uintptr(loadBalancer), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteLoadBalancer.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) { + if hr = procHcnCloseLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseLoadBalancer.Addr(), 1, uintptr(loadBalancer), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenService(service *hcnService, result **uint16) (hr error) { + if hr = procHcnOpenService.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenService.Addr(), 2, uintptr(unsafe.Pointer(service)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnRegisterServiceCallback(service hcnService, callback int32, context int32, callbackHandle *hcnCallbackHandle) (hr error) { + if hr = procHcnRegisterServiceCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnRegisterServiceCallback.Addr(), 4, uintptr(service), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnUnregisterServiceCallback(callbackHandle hcnCallbackHandle) (hr error) { + if hr = procHcnUnregisterServiceCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnUnregisterServiceCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseService(service hcnService) (hr error) { + if hr = procHcnCloseService.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseService.Addr(), 1, uintptr(service), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go new file mode 100644 index 00000000..ceb3ac85 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcsshim.go @@ -0,0 +1,28 @@ +// Shim for the Host Compute Service (HCS) to manage Windows Server +// containers and Hyper-V containers. + +package hcsshim + +import ( + "syscall" + + "github.com/Microsoft/hcsshim/internal/hcserror" +) + +//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go + +//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId + +const ( + // Specific user-visible exit codes + WaitErrExecFailed = 32767 + + ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE + ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115) + WSAEINVAL = syscall.Errno(10022) + + // Timeout on wait calls + TimeoutInfinite = 0xFFFFFFFF +) + +type HcsError = hcserror.HcsError diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go new file mode 100644 index 00000000..eb013d2c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -0,0 +1,94 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint = hns.HNSEndpoint + +// Namespace represents a Compartment. +type Namespace = hns.Namespace + +//SystemType represents the type of the system on which actions are done +type SystemType string + +// SystemType const +const ( + ContainerType SystemType = "Container" + VirtualMachineType SystemType = "VirtualMachine" + HostType SystemType = "Host" +) + +// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type EndpointAttachDetachRequest = hns.EndpointAttachDetachRequest + +// EndpointResquestResponse is object to get the endpoint request response +type EndpointResquestResponse = hns.EndpointResquestResponse + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + return hns.HNSEndpointRequest(method, path, request) +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + return hns.HNSListEndpointRequest() +} + +// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container +func HotAttachEndpoint(containerID string, endpointID string) error { + return modifyNetworkEndpoint(containerID, endpointID, Add) +} + +// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container +func HotDetachEndpoint(containerID string, endpointID string) error { + return modifyNetworkEndpoint(containerID, endpointID, Remove) +} + +// ModifyContainer corresponding to the container id, by sending a request +func modifyContainer(id string, request *ResourceModificationRequestResponse) error { + container, err := OpenContainer(id) + if err != nil { + if IsNotExist(err) { + return ErrComputeSystemDoesNotExist + } + return getInnerError(err) + } + defer container.Close() + err = container.Modify(request) + if err != nil { + if IsNotSupported(err) { + return ErrPlatformNotSupported + } + return getInnerError(err) + } + + return nil +} + +func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { + requestMessage := &ResourceModificationRequestResponse{ + Resource: Network, + Request: request, + Data: endpointID, + } + err := modifyContainer(containerID, requestMessage) + + if err != nil { + return err + } + + return nil +} + +// GetHNSEndpointByID get the Endpoint by ID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return hns.GetHNSEndpointByID(endpointID) +} + +// GetHNSEndpointByName gets the endpoint filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + return hns.GetHNSEndpointByName(endpointName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go new file mode 100644 index 00000000..2b538190 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go @@ -0,0 +1,16 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +type HNSGlobals = hns.HNSGlobals +type HNSVersion = hns.HNSVersion + +var ( + HNSVersion1803 = hns.HNSVersion1803 +) + +func GetHNSGlobals() (*HNSGlobals, error) { + return hns.GetHNSGlobals() +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go new file mode 100644 index 00000000..f775fa1d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go @@ -0,0 +1,36 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// Subnet is assoicated with a network and represents a list +// of subnets available to the network +type Subnet = hns.Subnet + +// MacPool is assoicated with a network and represents a list +// of macaddresses available to the network +type MacPool = hns.MacPool + +// HNSNetwork represents a network in HNS +type HNSNetwork = hns.HNSNetwork + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + return hns.HNSNetworkRequest(method, path, request) +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + return hns.HNSListNetworkRequest(method, path, request) +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return hns.GetHNSNetworkByID(networkID) +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + return hns.GetHNSNetworkByName(networkName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go new file mode 100644 index 00000000..a3e03ff8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go @@ -0,0 +1,57 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// Type of Request Support in ModifySystem +type PolicyType = hns.PolicyType + +// RequestType const +const ( + Nat = hns.Nat + ACL = hns.ACL + PA = hns.PA + VLAN = hns.VLAN + VSID = hns.VSID + VNet = hns.VNet + L2Driver = hns.L2Driver + Isolation = hns.Isolation + QOS = hns.QOS + OutboundNat = hns.OutboundNat + ExternalLoadBalancer = hns.ExternalLoadBalancer + Route = hns.Route +) + +type NatPolicy = hns.NatPolicy + +type QosPolicy = hns.QosPolicy + +type IsolationPolicy = hns.IsolationPolicy + +type VlanPolicy = hns.VlanPolicy + +type VsidPolicy = hns.VsidPolicy + +type PaPolicy = hns.PaPolicy + +type OutboundNatPolicy = hns.OutboundNatPolicy + +type ActionType = hns.ActionType +type DirectionType = hns.DirectionType +type RuleType = hns.RuleType + +const ( + Allow = hns.Allow + Block = hns.Block + + In = hns.In + Out = hns.Out + + Host = hns.Host + Switch = hns.Switch +) + +type ACLPolicy = hns.ACLPolicy + +type Policy = hns.Policy diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go new file mode 100644 index 00000000..55aaa4a5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go @@ -0,0 +1,47 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// RoutePolicy is a structure defining schema for Route based Policy +type RoutePolicy = hns.RoutePolicy + +// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy +type ELBPolicy = hns.ELBPolicy + +// LBPolicy is a structure defining schema for LoadBalancing based Policy +type LBPolicy = hns.LBPolicy + +// PolicyList is a structure defining schema for Policy list request +type PolicyList = hns.PolicyList + +// HNSPolicyListRequest makes a call into HNS to update/query a single network +func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { + return hns.HNSPolicyListRequest(method, path, request) +} + +// HNSListPolicyListRequest gets all the policy list +func HNSListPolicyListRequest() ([]PolicyList, error) { + return hns.HNSListPolicyListRequest() +} + +// PolicyListRequest makes a HNS call to modify/query a network policy list +func PolicyListRequest(method, path, request string) (*PolicyList, error) { + return hns.PolicyListRequest(method, path, request) +} + +// GetPolicyListByID get the policy list by ID +func GetPolicyListByID(policyListID string) (*PolicyList, error) { + return hns.GetPolicyListByID(policyListID) +} + +// AddLoadBalancer policy list for the specified endpoints +func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { + return hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) +} + +// AddRoute adds route policy list for the specified endpoints +func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { + return hns.AddRoute(endpoints, destinationPrefix, nextHop, encapEnabled) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/hnssupport.go new file mode 100644 index 00000000..69405244 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnssupport.go @@ -0,0 +1,13 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +type HNSSupportedFeatures = hns.HNSSupportedFeatures + +type HNSAclFeatures = hns.HNSAclFeatures + +func GetHNSSupportedFeatures() HNSSupportedFeatures { + return hns.GetHNSSupportedFeatures() +} diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go new file mode 100644 index 00000000..5b91e0cc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/interface.go @@ -0,0 +1,114 @@ +package hcsshim + +import ( + "io" + "time" + + "github.com/Microsoft/hcsshim/internal/schema1" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig = schema1.ProcessConfig + +type Layer = schema1.Layer +type MappedDir = schema1.MappedDir +type MappedPipe = schema1.MappedPipe +type HvRuntime = schema1.HvRuntime +type MappedVirtualDisk = schema1.MappedVirtualDisk + +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice = schema1.AssignedDevice + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig = schema1.ContainerConfig + +type ComputeSystemQuery = schema1.ComputeSystemQuery + +// Container represents a created (but not necessarily running) container. +type Container interface { + // Start synchronously starts the container. + Start() error + + // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. + Shutdown() error + + // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. + Terminate() error + + // Waits synchronously waits for the container to shutdown or terminate. + Wait() error + + // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It + // returns false if timeout occurs. + WaitTimeout(time.Duration) error + + // Pause pauses the execution of a container. + Pause() error + + // Resume resumes the execution of a container. + Resume() error + + // HasPendingUpdates returns true if the container has updates pending to install. + HasPendingUpdates() (bool, error) + + // Statistics returns statistics for a container. + Statistics() (Statistics, error) + + // ProcessList returns details for the processes in a container. + ProcessList() ([]ProcessListItem, error) + + // MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller + MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) + + // CreateProcess launches a new process within the container. + CreateProcess(c *ProcessConfig) (Process, error) + + // OpenProcess gets an interface to an existing process within the container. + OpenProcess(pid int) (Process, error) + + // Close cleans up any state associated with the container but does not terminate or wait for it. + Close() error + + // Modify the System + Modify(config *ResourceModificationRequestResponse) error +} + +// Process represents a running or exited process. +type Process interface { + // Pid returns the process ID of the process within the container. + Pid() int + + // Kill signals the process to terminate but does not wait for it to finish terminating. + Kill() error + + // Wait waits for the process to exit. + Wait() error + + // WaitTimeout waits for the process to exit or the duration to elapse. It returns + // false if timeout occurs. + WaitTimeout(time.Duration) error + + // ExitCode returns the exit code of the process. The process must have + // already terminated. + ExitCode() (int, error) + + // ResizeConsole resizes the console of the process. + ResizeConsole(width, height uint16) error + + // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing + // these pipes does not close the underlying pipes; it should be possible to + // call this multiple times to get multiple interfaces. + Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) + + // CloseStdin closes the write side of the stdin pipe so that the process is + // notified on the read side that there is no more data in stdin. + CloseStdin() error + + // Close cleans up any state associated with the process but does not kill + // or wait on it. + Close() error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/appargs/appargs.go b/vendor/github.com/Microsoft/hcsshim/internal/appargs/appargs.go new file mode 100644 index 00000000..f4993da0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/appargs/appargs.go @@ -0,0 +1,93 @@ +// Package appargs provides argument validation routines for use with +// github.com/urfave/cli. +package appargs + +import ( + "errors" + "strconv" + + "github.com/urfave/cli" +) + +// Validator is an argument validator function. It returns the number of +// arguments consumed or -1 on error. +type Validator = func([]string) int + +// String is a validator for strings. +func String(args []string) int { + if len(args) == 0 { + return -1 + } + return 1 +} + +// NonEmptyString is a validator for non-empty strings. +func NonEmptyString(args []string) int { + if len(args) == 0 || args[0] == "" { + return -1 + } + return 1 +} + +// Int returns a validator for integers. +func Int(base int, min int, max int) Validator { + return func(args []string) int { + if len(args) == 0 { + return -1 + } + i, err := strconv.ParseInt(args[0], base, 0) + if err != nil || int(i) < min || int(i) > max { + return -1 + } + return 1 + } +} + +// Optional returns a validator that treats an argument as optional. +func Optional(v Validator) Validator { + return func(args []string) int { + if len(args) == 0 { + return 0 + } + return v(args) + } +} + +// Rest returns a validator that validates each of the remaining arguments. +func Rest(v Validator) Validator { + return func(args []string) int { + count := len(args) + for len(args) != 0 { + n := v(args) + if n < 0 { + return n + } + args = args[n:] + } + return count + } +} + +// ErrInvalidUsage is returned when there is a validation error. +var ErrInvalidUsage = errors.New("invalid command usage") + +// Validate can be used as a command's Before function to validate the arguments +// to the command. +func Validate(vs ...Validator) cli.BeforeFunc { + return func(context *cli.Context) error { + remaining := context.Args() + for _, v := range vs { + consumed := v(remaining) + if consumed < 0 { + return ErrInvalidUsage + } + remaining = remaining[consumed:] + } + + if len(remaining) > 0 { + return ErrInvalidUsage + } + + return nil + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go new file mode 100644 index 00000000..842ee1f7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go @@ -0,0 +1,110 @@ +package cni + +import ( + "errors" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/regstate" +) + +const ( + cniRoot = "cni" + cniKey = "cfg" +) + +// PersistedNamespaceConfig is the registry version of the `NamespaceID` to UVM +// map. +type PersistedNamespaceConfig struct { + namespaceID string + stored bool + + ContainerID string + HostUniqueID guid.GUID +} + +// NewPersistedNamespaceConfig creates an in-memory namespace config that can be +// persisted to the registry. +func NewPersistedNamespaceConfig(namespaceID, containerID string, containerHostUniqueID guid.GUID) *PersistedNamespaceConfig { + return &PersistedNamespaceConfig{ + namespaceID: namespaceID, + ContainerID: containerID, + HostUniqueID: containerHostUniqueID, + } +} + +// LoadPersistedNamespaceConfig loads a persisted config from the registry that matches +// `namespaceID`. If not found returns `regstate.NotFoundError` +func LoadPersistedNamespaceConfig(namespaceID string) (*PersistedNamespaceConfig, error) { + sk, err := regstate.Open(cniRoot, false) + if err != nil { + return nil, err + } + defer sk.Close() + + pnc := PersistedNamespaceConfig{ + namespaceID: namespaceID, + stored: true, + } + if err := sk.Get(namespaceID, cniKey, &pnc); err != nil { + return nil, err + } + return &pnc, nil +} + +// Store stores or updates the in-memory config to its registry state. If the +// store failes returns the store error. +func (pnc *PersistedNamespaceConfig) Store() error { + if pnc.namespaceID == "" { + return errors.New("invalid namespaceID ''") + } + if pnc.ContainerID == "" { + return errors.New("invalid containerID ''") + } + empty := guid.GUID{} + if pnc.HostUniqueID == empty { + return errors.New("invalid containerHostUniqueID 'empy'") + } + sk, err := regstate.Open(cniRoot, false) + if err != nil { + return err + } + defer sk.Close() + + if pnc.stored { + if err := sk.Set(pnc.namespaceID, cniKey, pnc); err != nil { + return err + } + } else { + if err := sk.Create(pnc.namespaceID, cniKey, pnc); err != nil { + return err + } + } + pnc.stored = true + return nil +} + +// Remove removes any persisted state associated with this config. If the config +// is not found in the registery `Remove` returns no error. +func (pnc *PersistedNamespaceConfig) Remove() error { + if pnc.stored { + sk, err := regstate.Open(cniRoot, false) + if err != nil { + if regstate.IsNotFoundError(err) { + pnc.stored = false + return nil + } + return err + } + defer sk.Close() + + if err := sk.Remove(pnc.namespaceID); err != nil { + if regstate.IsNotFoundError(err) { + pnc.stored = false + return nil + } + return err + } + } + pnc.stored = false + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cni/registry_test.go b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry_test.go new file mode 100644 index 00000000..782cd0cc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry_test.go @@ -0,0 +1,137 @@ +package cni + +import ( + "testing" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/regstate" +) + +func Test_LoadPersistedNamespaceConfig_NoConfig(t *testing.T) { + pnc, err := LoadPersistedNamespaceConfig(t.Name()) + if pnc != nil { + t.Fatal("config should be nil") + } + if err == nil { + t.Fatal("err should be set") + } else { + if !regstate.IsNotFoundError(err) { + t.Fatal("err should be NotFoundError") + } + } +} + +func Test_LoadPersistedNamespaceConfig_WithConfig(t *testing.T) { + pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New()) + err := pnc.Store() + if err != nil { + pnc.Remove() + t.Fatalf("store failed with: %v", err) + } + defer pnc.Remove() + + pnc2, err := LoadPersistedNamespaceConfig(t.Name()) + if err != nil { + t.Fatal("should have no error on stored config") + } + if pnc2 == nil { + t.Fatal("stored config should have been returned") + } else { + if pnc.namespaceID != pnc2.namespaceID { + t.Fatal("actual/stored namespaceID not equal") + } + if pnc.ContainerID != pnc2.ContainerID { + t.Fatal("actual/stored ContainerID not equal") + } + if pnc.HostUniqueID != pnc2.HostUniqueID { + t.Fatal("actual/stored HostUniqueID not equal") + } + if !pnc2.stored { + t.Fatal("stored should be true for registry load") + } + } +} + +func Test_PersistedNamespaceConfig_StoreNew(t *testing.T) { + pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New()) + err := pnc.Store() + if err != nil { + pnc.Remove() + t.Fatalf("store failed with: %v", err) + } + defer pnc.Remove() +} + +func Test_PersistedNamespaceConfig_StoreUpdate(t *testing.T) { + pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New()) + err := pnc.Store() + if err != nil { + pnc.Remove() + t.Fatalf("store failed with: %v", err) + } + defer pnc.Remove() + + pnc.ContainerID = "test-container2" + pnc.HostUniqueID = guid.New() + err = pnc.Store() + if err != nil { + pnc.Remove() + t.Fatalf("store update failed with: %v", err) + } + + // Verify the update + pnc2, err := LoadPersistedNamespaceConfig(t.Name()) + if err != nil { + t.Fatal("stored config should have been returned") + } + if pnc.ContainerID != pnc2.ContainerID { + t.Fatal("actual/stored ContainerID not equal") + } + if pnc.HostUniqueID != pnc2.HostUniqueID { + t.Fatal("actual/stored HostUniqueID not equal") + } +} + +func Test_PersistedNamespaceConfig_RemoveNotStored(t *testing.T) { + pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New()) + err := pnc.Remove() + if err != nil { + t.Fatalf("remove on not stored should not fail: %v", err) + } +} + +func Test_PersistedNamespaceConfig_RemoveStoredKey(t *testing.T) { + pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New()) + err := pnc.Store() + if err != nil { + t.Fatalf("store failed with: %v", err) + } + err = pnc.Remove() + if err != nil { + t.Fatalf("remove on stored key should not fail: %v", err) + } +} + +func Test_PersistedNamespaceConfig_RemovedOtherKey(t *testing.T) { + pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New()) + err := pnc.Store() + if err != nil { + t.Fatalf("store failed with: %v", err) + } + + pnc2, err := LoadPersistedNamespaceConfig(t.Name()) + if err != nil { + t.Fatal("should of found stored config") + } + + err = pnc.Remove() + if err != nil { + t.Fatalf("remove on stored key should not fail: %v", err) + } + + // Now remove the other key that has the invalid memory state + err = pnc2.Remove() + if err != nil { + t.Fatalf("remove on in-memory already removed should not fail: %v", err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go b/vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go new file mode 100644 index 00000000..44998cfc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go @@ -0,0 +1,40 @@ +package copyfile + +import ( + "fmt" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procCopyFileW = modkernel32.NewProc("CopyFileW") +) + +// CopyFile is a utility for copying a file - used for the LCOW scratch cache. +// Uses CopyFileW win32 API for performance. +func CopyFile(srcFile, destFile string, overwrite bool) error { + var bFailIfExists uint32 = 1 + if overwrite { + bFailIfExists = 0 + } + + lpExistingFileName, err := syscall.UTF16PtrFromString(srcFile) + if err != nil { + return err + } + lpNewFileName, err := syscall.UTF16PtrFromString(destFile) + if err != nil { + return err + } + r1, _, err := syscall.Syscall( + procCopyFileW.Addr(), + 3, + uintptr(unsafe.Pointer(lpExistingFileName)), + uintptr(unsafe.Pointer(lpNewFileName)), + uintptr(bFailIfExists)) + if r1 == 0 { + return fmt.Errorf("failed CopyFileW Win32 call from '%s' to '%s': %s", srcFile, destFile, err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/copywithtimeout/copywithtimeout.go b/vendor/github.com/Microsoft/hcsshim/internal/copywithtimeout/copywithtimeout.go new file mode 100644 index 00000000..c73fbd41 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/copywithtimeout/copywithtimeout.go @@ -0,0 +1,103 @@ +package copywithtimeout + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "strconv" + "syscall" + "time" + + "github.com/sirupsen/logrus" +) + +// logDataByteCount is for an advanced debugging technique to allow +// data read/written to a processes stdio channels hex-dumped to the +// log when running at debug level or higher. It is controlled through +// the environment variable HCSSHIM_LOG_DATA_BYTE_COUNT +var logDataByteCount int64 + +func init() { + bytes := os.Getenv("HCSSHIM_LOG_DATA_BYTE_COUNT") + if len(bytes) > 0 { + u, err := strconv.ParseUint(bytes, 10, 32) + if err == nil { + logDataByteCount = int64(u) + } + } +} + +// Copy is a wrapper for io.Copy using a timeout duration +func Copy(dst io.Writer, src io.Reader, size int64, context string, timeout time.Duration) (int64, error) { + logrus.WithFields(logrus.Fields{ + "stdval": context, + "size": size, + "timeout": timeout, + }).Debug("hcsshim::copywithtimeout - Begin") + + type resultType struct { + err error + bytes int64 + } + + done := make(chan resultType, 1) + go func() { + result := resultType{} + if logrus.GetLevel() < logrus.DebugLevel || logDataByteCount == 0 { + result.bytes, result.err = io.Copy(dst, src) + } else { + // In advanced debug mode where we log (hexdump format) what is copied + // up to the number of bytes defined by environment variable + // HCSSHIM_LOG_DATA_BYTE_COUNT + var buf bytes.Buffer + tee := io.TeeReader(src, &buf) + result.bytes, result.err = io.Copy(dst, tee) + if result.err == nil { + size := result.bytes + if size > logDataByteCount { + size = logDataByteCount + } + if size > 0 { + bytes := make([]byte, size) + if _, err := buf.Read(bytes); err == nil { + logrus.Debugf("hcsshim::copyWithTimeout - Read bytes\n%s", hex.Dump(bytes)) + } + } + } + } + done <- result + }() + + var result resultType + timedout := time.After(timeout) + + select { + case <-timedout: + return 0, fmt.Errorf("hcsshim::copyWithTimeout: timed out (%s)", context) + case result = <-done: + if result.err != nil && result.err != io.EOF { + // See https://github.com/golang/go/blob/f3f29d1dea525f48995c1693c609f5e67c046893/src/os/exec/exec_windows.go for a clue as to why we are doing this :) + if se, ok := result.err.(syscall.Errno); ok { + const ( + errNoData = syscall.Errno(232) + errBrokenPipe = syscall.Errno(109) + ) + if se == errNoData || se == errBrokenPipe { + logrus.WithFields(logrus.Fields{ + "stdval": context, + logrus.ErrorKey: se, + }).Debug("hcsshim::copywithtimeout - End") + return result.bytes, nil + } + } + return 0, fmt.Errorf("hcsshim::copyWithTimeout: error reading: '%s' after %d bytes (%s)", result.err, result.bytes, context) + } + } + logrus.WithFields(logrus.Fields{ + "stdval": context, + "copied-bytes": result.bytes, + }).Debug("hcsshim::copywithtimeout - Completed Successfully") + return result.bytes, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go b/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go new file mode 100644 index 00000000..5d3d0dfe --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/guestrequest/types.go @@ -0,0 +1,100 @@ +package guestrequest + +import ( + "github.com/Microsoft/hcsshim/internal/schema2" +) + +// Arguably, many of these (at least CombinedLayers) should have been generated +// by swagger. +// +// This will also change package name due to an inbound breaking change. + +// This class is used by a modify request to add or remove a combined layers +// structure in the guest. For windows, the GCS applies a filter in ContainerRootPath +// using the specified layers as the parent content. Ignores property ScratchPath +// since the container path is already the scratch path. For linux, the GCS unions +// the specified layers and ScratchPath together, placing the resulting union +// filesystem at ContainerRootPath. +type CombinedLayers struct { + ContainerRootPath string `json:"ContainerRootPath,omitempty"` + Layers []hcsschema.Layer `json:"Layers,omitempty"` + ScratchPath string `json:"ScratchPath,omitempty"` +} + +// Defines the schema for hosted settings passed to GCS and/or OpenGCS + +// SCSI. Scratch space for remote file-system commands, or R/W layer for containers +type LCOWMappedVirtualDisk struct { + MountPath string `json:"MountPath,omitempty"` // /tmp/scratch for an LCOW utility VM being used as a service VM + Lun uint8 `json:"Lun,omitempty"` + Controller uint8 `json:"Controller,omitempty"` + ReadOnly bool `json:"ReadOnly,omitempty"` +} + +type WCOWMappedVirtualDisk struct { + ContainerPath string `json:"ContainerPath,omitempty"` + Lun int32 `json:"Lun,omitempty"` +} + +type LCOWMappedDirectory struct { + MountPath string `json:"MountPath,omitempty"` + Port int32 `json:"Port,omitempty"` + ShareName string `json:"ShareName,omitempty"` // If empty not using ANames (not currently supported) + ReadOnly bool `json:"ReadOnly,omitempty"` +} + +// Read-only layers over VPMem +type LCOWMappedVPMemDevice struct { + DeviceNumber uint32 `json:"DeviceNumber,omitempty"` + MountPath string `json:"MountPath,omitempty"` // /tmp/pN +} + +type LCOWNetworkAdapter struct { + NamespaceID string `json:",omitempty"` + ID string `json:",omitempty"` + MacAddress string `json:",omitempty"` + IPAddress string `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + EnableLowMetric bool `json:",omitempty"` + EncapOverhead uint16 `json:",omitempty"` +} + +type ResourceType string + +const ( + // These are constants for v2 schema modify guest requests. + ResourceTypeMappedDirectory ResourceType = "MappedDirectory" + ResourceTypeMappedVirtualDisk ResourceType = "MappedVirtualDisk" + ResourceTypeNetwork ResourceType = "Network" + ResourceTypeNetworkNamespace ResourceType = "NetworkNamespace" + ResourceTypeCombinedLayers ResourceType = "CombinedLayers" + ResourceTypeVPMemDevice ResourceType = "VPMemDevice" +) + +// GuestRequest is for modify commands passed to the guest. +type GuestRequest struct { + RequestType string `json:"RequestType,omitempty"` + ResourceType ResourceType `json:"ResourceType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +type NetworkModifyRequest struct { + AdapterId string `json:"AdapterId,omitempty"` + RequestType string `json:"RequestType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +type RS4NetworkModifyRequest struct { + AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` + RequestType string `json:"RequestType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +// SignalProcessOptions is the options passed to either WCOW or LCOW +// to signal a given process. +type SignalProcessOptions struct { + Signal int `json:,omitempty` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go b/vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go new file mode 100644 index 00000000..e9e45c03 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/guid/guid.go @@ -0,0 +1,69 @@ +package guid + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +var _ = (json.Marshaler)(&GUID{}) +var _ = (json.Unmarshaler)(&GUID{}) + +type GUID [16]byte + +func New() GUID { + g := GUID{} + _, err := io.ReadFull(rand.Reader, g[:]) + if err != nil { + panic(err) + } + return g +} + +func (g GUID) String() string { + return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:]) +} + +func FromString(s string) GUID { + if len(s) != 36 { + panic(fmt.Sprintf("invalid GUID length: %d", len(s))) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + panic("invalid GUID format") + } + indexOrder := [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } + byteOrder := [16]int{ + 3, 2, 1, 0, + 5, 4, + 7, 6, + 8, 9, + 10, 11, 12, 13, 14, 15, + } + var g GUID + for i, x := range indexOrder { + b, err := strconv.ParseInt(s[x:x+2], 16, 16) + if err != nil { + panic(err) + } + g[byteOrder[i]] = byte(b) + } + return g +} + +func (g GUID) MarshalJSON() ([]byte, error) { + return json.Marshal(g.String()) +} + +func (g *GUID) UnmarshalJSON(data []byte) error { + *g = FromString(strings.Trim(string(data), "\"")) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/guid/guid_test.go b/vendor/github.com/Microsoft/hcsshim/internal/guid/guid_test.go new file mode 100644 index 00000000..0f9a341c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/guid/guid_test.go @@ -0,0 +1,136 @@ +package guid + +import ( + "encoding/json" + "fmt" + "testing" +) + +func Test_New(t *testing.T) { + g := New() + g2 := New() + if g == g2 { + t.Fatal("GUID's should not be equal when generated") + } +} + +func Test_FromString(t *testing.T) { + g := New() + g2 := FromString(g.String()) + if g != g2 { + t.Fatalf("GUID's not equal %v, %v", g, g2) + } +} + +func Test_MarshalJSON(t *testing.T) { + g := New() + gs := g.String() + js, err := json.Marshal(g) + if err != nil { + t.Fatalf("failed to marshal with %v", err) + } + gsJSON := fmt.Sprintf("\"%s\"", gs) + if gsJSON != string(js) { + t.Fatalf("failed to marshal %s != %s", gsJSON, string(js)) + } +} + +func Test_MarshalJSON_Ptr(t *testing.T) { + g := New() + gs := g.String() + js, err := json.Marshal(&g) + if err != nil { + t.Fatalf("failed to marshal with %v", err) + } + gsJSON := fmt.Sprintf("\"%s\"", gs) + if gsJSON != string(js) { + t.Fatalf("failed to marshal %s != %s", gsJSON, string(js)) + } +} + +func Test_MarshalJSON_Nested(t *testing.T) { + type test struct { + G GUID + } + t1 := test{ + G: New(), + } + gs := t1.G.String() + js, err := json.Marshal(t1) + if err != nil { + t.Fatalf("failed to marshal with %v", err) + } + gsJSON := fmt.Sprintf("{\"G\":\"%s\"}", gs) + if gsJSON != string(js) { + t.Fatalf("failed to marshal %s != %s", gsJSON, string(js)) + } +} + +func Test_MarshalJSON_Nested_Ptr(t *testing.T) { + type test struct { + G *GUID + } + v := New() + t1 := test{ + G: &v, + } + gs := t1.G.String() + js, err := json.Marshal(t1) + if err != nil { + t.Fatalf("failed to marshal with %v", err) + } + gsJSON := fmt.Sprintf("{\"G\":\"%s\"}", gs) + if gsJSON != string(js) { + t.Fatalf("failed to marshal %s != %s", gsJSON, string(js)) + } +} + +func Test_UnmarshalJSON(t *testing.T) { + g := New() + js, _ := json.Marshal(g) + var g2 GUID + err := json.Unmarshal(js, &g2) + if err != nil { + t.Fatalf("failed to unmarshal with: %v", err) + } + if g != g2 { + t.Fatalf("failed to unmarshal %s != %s", g, g2) + } +} + +func Test_UnmarshalJSON_Nested(t *testing.T) { + type test struct { + G GUID + } + t1 := test{ + G: New(), + } + js, _ := json.Marshal(t1) + var t2 test + err := json.Unmarshal(js, &t2) + if err != nil { + t.Fatalf("failed to unmarshal with: %v", err) + } + if t1.G != t2.G { + t.Fatalf("failed to unmarshal %v != %v", t1.G, t2.G) + } +} + +func Test_UnmarshalJSON_Nested_Ptr(t *testing.T) { + type test struct { + G *GUID + } + v := New() + t1 := test{ + G: &v, + } + js, _ := json.Marshal(t1) + var t2 test + err := json.Unmarshal(js, &t2) + if err != nil { + t.Fatalf("failed to unmarshal with: %v", err) + } + if *t1.G != *t2.G { + t.Fatalf("failed to unmarshal %v != %v", t1.G, t2.G) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go new file mode 100644 index 00000000..f9a922a4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -0,0 +1,104 @@ +package hcs + +import ( + "sync" + "syscall" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +var ( + nextCallback uintptr + callbackMap = map[uintptr]*notifcationWatcherContext{} + callbackMapLock = sync.RWMutex{} + + notificationWatcherCallback = syscall.NewCallback(notificationWatcher) + + // Notifications for HCS_SYSTEM handles + hcsNotificationSystemExited hcsNotification = 0x00000001 + hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 + hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 + hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 + hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 + hcsNotificationSystemCrashReport hcsNotification = 0x00000006 + hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007 + hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008 + hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009 + hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A + hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B + hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C + hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D + hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E + + // Notifications for HCS_PROCESS handles + hcsNotificationProcessExited hcsNotification = 0x00010000 + + // Common notifications + hcsNotificationInvalid hcsNotification = 0x00000000 + hcsNotificationServiceDisconnect hcsNotification = 0x01000000 +) + +type hcsNotification uint32 +type notificationChannel chan error + +type notifcationWatcherContext struct { + channels notificationChannels + handle hcsCallback +} + +type notificationChannels map[hcsNotification]notificationChannel + +func newChannels() notificationChannels { + channels := make(notificationChannels) + + channels[hcsNotificationSystemExited] = make(notificationChannel, 1) + channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1) + channels[hcsNotificationProcessExited] = make(notificationChannel, 1) + channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1) + channels[hcsNotificationSystemCrashReport] = make(notificationChannel, 1) + channels[hcsNotificationSystemSiloJobCreated] = make(notificationChannel, 1) + channels[hcsNotificationSystemSaveCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemRdpEnhancedModeStateChanged] = make(notificationChannel, 1) + channels[hcsNotificationSystemShutdownFailed] = make(notificationChannel, 1) + channels[hcsNotificationSystemGetPropertiesCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemModifyCompleted] = make(notificationChannel, 1) + channels[hcsNotificationSystemCrashInitiated] = make(notificationChannel, 1) + channels[hcsNotificationSystemGuestConnectionClosed] = make(notificationChannel, 1) + + return channels +} + +func closeChannels(channels notificationChannels) { + for _, c := range channels { + close(c) + } +} + +func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { + var result error + if int32(notificationStatus) < 0 { + result = interop.Win32FromHresult(notificationStatus) + } + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return 0 + } + + if channel, ok := context.channels[notificationType]; ok { + channel <- result + } else { + logrus.WithFields(logrus.Fields{ + "notification-type": notificationType, + }).Warn("Received a callback of an unsupported type") + } + + return 0 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go new file mode 100644 index 00000000..3669c34a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/cgo.go @@ -0,0 +1,7 @@ +package hcs + +import "C" + +// This import is needed to make the library compile as CGO because HCSSHIM +// only works with CGO due to callbacks from HCS comming back from a C thread +// which is not supported without CGO. See https://github.com/golang/go/issues/10973 diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go new file mode 100644 index 00000000..079b5653 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -0,0 +1,287 @@ +package hcs + +import ( + "encoding/json" + "errors" + "fmt" + "syscall" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/sirupsen/logrus" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists + ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = syscall.Errno(0x490) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = syscall.Errno(0x32) + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = syscall.Errno(0xd) + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed") + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = errors.New("hcsshim: timeout waiting for notification") + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = errors.New("unexpected container exit") + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = errors.New("unexpected value returned from hcs") + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) + + // ErrProcNotFound is an error encountered when the the process cannot be found + ErrProcNotFound = syscall.Errno(0x7f) + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5) + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d) + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) + + // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly + ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106) + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = errors.New("unsupported platform request") +) + +type ErrorEvent struct { + Message string `json:"Message,omitempty"` // Fully formated error message + StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form + Provider string `json:"Provider,omitempty"` + EventID uint16 `json:"EventId,omitempty"` + Flags uint32 `json:"Flags,omitempty"` + Source string `json:"Source,omitempty"` + //Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function) +} + +type hcsResult struct { + Error int32 + ErrorMessage string + ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"` +} + +func (ev *ErrorEvent) String() string { + evs := "[Event Detail: " + ev.Message + if ev.StackTrace != "" { + evs += " Stack Trace: " + ev.StackTrace + } + if ev.Provider != "" { + evs += " Provider: " + ev.Provider + } + if ev.EventID != 0 { + evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID) + } + if ev.Flags != 0 { + evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags) + } + if ev.Source != "" { + evs += " Source: " + ev.Source + } + evs += "]" + return evs +} + +func processHcsResult(resultp *uint16) []ErrorEvent { + if resultp != nil { + resultj := interop.ConvertAndFreeCoTaskMemString(resultp) + logrus.WithField(logfields.JSON, resultj). + Debug("HCS Result") + result := &hcsResult{} + if err := json.Unmarshal([]byte(resultj), result); err != nil { + logrus.WithFields(logrus.Fields{ + logfields.JSON: resultj, + logrus.ErrorKey: err, + }).Warning("Could not unmarshal HCS result") + return nil + } + return result.ErrorEvents + } + return nil +} + +type HcsError struct { + Op string + Err error + Events []ErrorEvent +} + +func (e *HcsError) Error() string { + s := e.Op + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + SystemID string + Pid int + Op string + Err error + Events []ErrorEvent +} + +// SystemError is an error encountered in HCS during an operation on a Container object +type SystemError struct { + ID string + Op string + Err error + Extra string + Events []ErrorEvent +} + +func (e *SystemError) Error() string { + s := e.Op + " " + e.ID + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + if e.Extra != "" { + s += "\n(extra info: " + e.Extra + ")" + } + return s +} + +func makeSystemError(system *System, op string, extra string, err error, events []ErrorEvent) error { + // Don't double wrap errors + if _, ok := err.(*SystemError); ok { + return err + } + return &SystemError{ + ID: system.ID(), + Op: op, + Extra: extra, + Err: err, + Events: events, + } +} + +func (e *ProcessError) Error() string { + s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error()) + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { + // Don't double wrap errors + if _, ok := err.(*ProcessError); ok { + return err + } + return &ProcessError{ + Pid: process.Pid(), + SystemID: process.SystemID(), + Op: op, + Err: err, + Events: events, + } +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +func IsNotExist(err error) bool { + err = getInnerError(err) + return err == ErrComputeSystemDoesNotExist || + err == ErrElementNotFound || + err == ErrProcNotFound +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + err = getInnerError(err) + return err == ErrAlreadyClosed +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationPending +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + err = getInnerError(err) + return err == ErrTimeout +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound or ErrProcNotFound. +func IsAlreadyStopped(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeAlreadyStopped || + err == ErrElementNotFound || + err == ErrProcNotFound +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + err = getInnerError(err) + // If Platform doesn't recognize or support the request sent, below errors are seen + return err == ErrVmcomputeInvalidJSON || + err == ErrInvalidData || + err == ErrNotSupported || + err == ErrVmcomputeUnknownMessage +} + +func getInnerError(err error) error { + switch pe := err.(type) { + case nil: + return nil + case *HcsError: + err = pe.Err + case *SystemError: + err = pe.Err + case *ProcessError: + err = pe.Err + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go new file mode 100644 index 00000000..b0d49cbc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/hcs.go @@ -0,0 +1,48 @@ +// Shim for the Host Compute Service (HCS) to manage Windows Server +// containers and Hyper-V containers. + +package hcs + +import ( + "syscall" +) + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hcs.go + +//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? +//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? +//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? +//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? +//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? +//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? +//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? +//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? +//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? +//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? +//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? +//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? +//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? + +//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? +//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? +//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess? +//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? +//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? +//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? +//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? +//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? +//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? + +type hcsSystem syscall.Handle +type hcsProcess syscall.Handle +type hcsCallback syscall.Handle + +type hcsProcessInformation struct { + ProcessId uint32 + Reserved uint32 + StdInput syscall.Handle + StdOutput syscall.Handle + StdError syscall.Handle +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go new file mode 100644 index 00000000..6d03b17a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go @@ -0,0 +1,20 @@ +package hcs + +import "github.com/sirupsen/logrus" + +func logOperationBegin(ctx logrus.Fields, msg string) { + logrus.WithFields(ctx).Debug(msg) +} + +func logOperationEnd(ctx logrus.Fields, msg string, err error) { + // Copy the log and fields first. + log := logrus.WithFields(ctx) + if err == nil { + log.Debug(msg) + } else { + // Edit only the copied field data to avoid race conditions on the + // write. + log.Data[logrus.ErrorKey] = err + log.Error(msg) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go new file mode 100644 index 00000000..41e20bbf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -0,0 +1,459 @@ +package hcs + +import ( + "encoding/json" + "io" + "sync" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/sirupsen/logrus" +) + +// ContainerError is an error encountered in HCS +type Process struct { + handleLock sync.RWMutex + handle hcsProcess + processID int + system *System + cachedPipes *cachedPipes + callbackNumber uintptr + + logctx logrus.Fields +} + +func newProcess(process hcsProcess, processID int, computeSystem *System) *Process { + return &Process{ + handle: process, + processID: processID, + system: computeSystem, + logctx: logrus.Fields{ + logfields.ContainerID: computeSystem.ID(), + logfields.ProcessID: processID, + }, + } +} + +type cachedPipes struct { + stdIn syscall.Handle + stdOut syscall.Handle + stdErr syscall.Handle +} + +type processModifyRequest struct { + Operation string + ConsoleSize *consoleSize `json:",omitempty"` + CloseHandle *closeHandle `json:",omitempty"` +} + +type consoleSize struct { + Height uint16 + Width uint16 +} + +type closeHandle struct { + Handle string +} + +type ProcessStatus struct { + ProcessID uint32 + Exited bool + ExitCode uint32 + LastWaitResult int32 +} + +const ( + stdIn string = "StdIn" + stdOut string = "StdOut" + stdErr string = "StdErr" +) + +const ( + modifyConsoleSize string = "ConsoleSize" + modifyCloseHandle string = "CloseHandle" +) + +// Pid returns the process ID of the process within the container. +func (process *Process) Pid() int { + return process.processID +} + +// SystemID returns the ID of the process's compute system. +func (process *Process) SystemID() string { + return process.system.ID() +} + +func (process *Process) logOperationBegin(operation string) { + logOperationBegin( + process.logctx, + operation+" - Begin Operation") +} + +func (process *Process) logOperationEnd(operation string, err error) { + var result string + if err == nil { + result = "Success" + } else { + result = "Error" + } + + logOperationEnd( + process.logctx, + operation+" - End Operation - "+result, + err) +} + +// Signal signals the process with `options`. +func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcsshim::Process::Signal" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + optionsb, err := json.Marshal(options) + if err != nil { + return err + } + + optionsStr := string(optionsb) + + var resultp *uint16 + syscallWatcher(process.logctx, func() { + err = hcsSignalProcess(process.handle, optionsStr, &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + return nil +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *Process) Kill() (err error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcsshim::Process::Kill" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + var resultp *uint16 + syscallWatcher(process.logctx, func() { + err = hcsTerminateProcess(process.handle, &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + return nil +} + +// Wait waits for the process to exit. +func (process *Process) Wait() (err error) { + operation := "hcsshim::Process::Wait" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil) + if err != nil { + return makeProcessError(process, operation, err, nil) + } + + return nil +} + +// WaitTimeout waits for the process to exit or the duration to elapse. It returns +// false if timeout occurs. +func (process *Process) WaitTimeout(timeout time.Duration) (err error) { + operation := "hcssshim::Process::WaitTimeout" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout) + if err != nil { + return makeProcessError(process, operation, err, nil) + } + + return nil +} + +// ResizeConsole resizes the console of the process. +func (process *Process) ResizeConsole(width, height uint16) (err error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcsshim::Process::ResizeConsole" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + modifyRequest := processModifyRequest{ + Operation: modifyConsoleSize, + ConsoleSize: &consoleSize{ + Height: height, + Width: width, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + modifyRequestStr := string(modifyRequestb) + + var resultp *uint16 + err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp) + events := processHcsResult(resultp) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + return nil +} + +func (process *Process) Properties() (_ *ProcessStatus, err error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcsshim::Process::Properties" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + if process.handle == 0 { + return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + var ( + resultp *uint16 + propertiesp *uint16 + ) + syscallWatcher(process.logctx, func() { + err = hcsGetProcessProperties(process.handle, &propertiesp, &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return nil, makeProcessError(process, operation, err, events) + } + + if propertiesp == nil { + return nil, ErrUnexpectedValue + } + propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp) + + properties := &ProcessStatus{} + if err := json.Unmarshal(propertiesRaw, properties); err != nil { + return nil, makeProcessError(process, operation, err, nil) + } + + return properties, nil +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *Process) ExitCode() (_ int, err error) { + operation := "hcsshim::Process::ExitCode" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + properties, err := process.Properties() + if err != nil { + return 0, makeProcessError(process, operation, err, nil) + } + + if properties.Exited == false { + return 0, makeProcessError(process, operation, ErrInvalidProcessState, nil) + } + + if properties.LastWaitResult != 0 { + return 0, makeProcessError(process, operation, syscall.Errno(properties.LastWaitResult), nil) + } + + return int(properties.ExitCode), nil +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes; it should be possible to +// call this multiple times to get multiple interfaces. +func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcsshim::Process::Stdio" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + if process.handle == 0 { + return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + var stdIn, stdOut, stdErr syscall.Handle + + if process.cachedPipes == nil { + var ( + processInfo hcsProcessInformation + resultp *uint16 + ) + err = hcsGetProcessInfo(process.handle, &processInfo, &resultp) + events := processHcsResult(resultp) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, events) + } + + stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError + } else { + // Use cached pipes + stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr + + // Invalidate the cache + process.cachedPipes = nil + } + + pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr}) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, nil) + } + + return pipes[0], pipes[1], pipes[2], nil +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *Process) CloseStdin() (err error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcsshim::Process::CloseStdin" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + modifyRequest := processModifyRequest{ + Operation: modifyCloseHandle, + CloseHandle: &closeHandle{ + Handle: stdIn, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + modifyRequestStr := string(modifyRequestb) + + var resultp *uint16 + err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp) + events := processHcsResult(resultp) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + return nil +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *Process) Close() (err error) { + process.handleLock.Lock() + defer process.handleLock.Unlock() + + operation := "hcsshim::Process::Close" + process.logOperationBegin(operation) + defer func() { process.logOperationEnd(operation, err) }() + + // Don't double free this + if process.handle == 0 { + return nil + } + + if err = process.unregisterCallback(); err != nil { + return makeProcessError(process, operation, err, nil) + } + + if err = hcsCloseProcess(process.handle); err != nil { + return makeProcessError(process, operation, err, nil) + } + + process.handle = 0 + + return nil +} + +func (process *Process) registerCallback() error { + context := ¬ifcationWatcherContext{ + channels: newChannels(), + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = context + callbackMapLock.Unlock() + + var callbackHandle hcsCallback + err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle) + if err != nil { + return err + } + context.handle = callbackHandle + process.callbackNumber = callbackNumber + + return nil +} + +func (process *Process) unregisterCallback() error { + callbackNumber := process.callbackNumber + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return nil + } + + handle := context.handle + + if handle == 0 { + return nil + } + + // hcsUnregisterProcessCallback has its own syncronization + // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := hcsUnregisterProcessCallback(handle) + if err != nil { + return err + } + + closeChannels(context.channels) + + callbackMapLock.Lock() + callbackMap[callbackNumber] = nil + callbackMapLock.Unlock() + + handle = 0 + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go new file mode 100644 index 00000000..20b24252 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -0,0 +1,685 @@ +package hcs + +import ( + "encoding/json" + "os" + "strconv" + "sync" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/schema1" + "github.com/Microsoft/hcsshim/internal/timeout" + "github.com/sirupsen/logrus" +) + +// currentContainerStarts is used to limit the number of concurrent container +// starts. +var currentContainerStarts containerStarts + +type containerStarts struct { + maxParallel int + inProgress int + sync.Mutex +} + +func init() { + mpsS := os.Getenv("HCSSHIM_MAX_PARALLEL_START") + if len(mpsS) > 0 { + mpsI, err := strconv.Atoi(mpsS) + if err != nil || mpsI < 0 { + return + } + currentContainerStarts.maxParallel = mpsI + } +} + +type System struct { + handleLock sync.RWMutex + handle hcsSystem + id string + callbackNumber uintptr + + logctx logrus.Fields +} + +func newSystem(id string) *System { + return &System{ + id: id, + logctx: logrus.Fields{ + logfields.ContainerID: id, + }, + } +} + +func (computeSystem *System) logOperationBegin(operation string) { + logOperationBegin( + computeSystem.logctx, + operation+" - Begin Operation") +} + +func (computeSystem *System) logOperationEnd(operation string, err error) { + var result string + if err == nil { + result = "Success" + } else { + result = "Error" + } + + logOperationEnd( + computeSystem.logctx, + operation+" - End Operation - "+result, + err) +} + +// CreateComputeSystem creates a new compute system with the given configuration but does not start it. +func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System, err error) { + operation := "hcsshim::CreateComputeSystem" + + computeSystem := newSystem(id) + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + hcsDocumentB, err := json.Marshal(hcsDocumentInterface) + if err != nil { + return nil, err + } + + hcsDocument := string(hcsDocumentB) + + logrus.WithFields(computeSystem.logctx). + WithField(logfields.JSON, hcsDocument). + Debug("HCS ComputeSystem Document") + + var ( + resultp *uint16 + identity syscall.Handle + createError error + ) + syscallWatcher(computeSystem.logctx, func() { + createError = hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp) + }) + + if createError == nil || IsPending(createError) { + if err = computeSystem.registerCallback(); err != nil { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + computeSystem.Terminate() + return nil, makeSystemError(computeSystem, operation, "", err, nil) + } + } + + events, err := processAsyncHcsResult(createError, resultp, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) + if err != nil { + if err == ErrTimeout { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + computeSystem.Terminate() + } + return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events) + } + + return computeSystem, nil +} + +// OpenComputeSystem opens an existing compute system by ID. +func OpenComputeSystem(id string) (_ *System, err error) { + operation := "hcsshim::OpenComputeSystem" + + computeSystem := newSystem(id) + computeSystem.logOperationBegin(operation) + defer func() { + if IsNotExist(err) { + computeSystem.logOperationEnd(operation, nil) + } else { + computeSystem.logOperationEnd(operation, err) + } + }() + + var ( + handle hcsSystem + resultp *uint16 + ) + err = hcsOpenComputeSystem(id, &handle, &resultp) + events := processHcsResult(resultp) + if err != nil { + return nil, makeSystemError(computeSystem, operation, "", err, events) + } + + computeSystem.handle = handle + + if err = computeSystem.registerCallback(); err != nil { + return nil, makeSystemError(computeSystem, operation, "", err, nil) + } + + return computeSystem, nil +} + +// GetComputeSystems gets a list of the compute systems on the system that match the query +func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) { + operation := "hcsshim::GetComputeSystems" + fields := logrus.Fields{} + logOperationBegin( + fields, + operation+" - Begin Operation") + + defer func() { + var result string + if err == nil { + result = "Success" + } else { + result = "Error" + } + + logOperationEnd( + fields, + operation+" - End Operation - "+result, + err) + }() + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + + query := string(queryb) + + logrus.WithFields(fields). + WithField(logfields.JSON, query). + Debug("HCS ComputeSystem Query") + + var ( + resultp *uint16 + computeSystemsp *uint16 + ) + + syscallWatcher(fields, func() { + err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if computeSystemsp == nil { + return nil, ErrUnexpectedValue + } + computeSystemsRaw := interop.ConvertAndFreeCoTaskMemBytes(computeSystemsp) + computeSystems := []schema1.ContainerProperties{} + if err = json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil { + return nil, err + } + + return computeSystems, nil +} + +// Start synchronously starts the computeSystem. +func (computeSystem *System) Start() (err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcsshim::ComputeSystem::Start" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil) + } + + // This is a very simple backoff-retry loop to limit the number + // of parallel container starts if environment variable + // HCSSHIM_MAX_PARALLEL_START is set to a positive integer. + // It should generally only be used as a workaround to various + // platform issues that exist between RS1 and RS4 as of Aug 2018 + if currentContainerStarts.maxParallel > 0 { + for { + currentContainerStarts.Lock() + if currentContainerStarts.inProgress < currentContainerStarts.maxParallel { + currentContainerStarts.inProgress++ + currentContainerStarts.Unlock() + break + } + if currentContainerStarts.inProgress == currentContainerStarts.maxParallel { + currentContainerStarts.Unlock() + time.Sleep(100 * time.Millisecond) + } + } + // Make sure we decrement the count when we are done. + defer func() { + currentContainerStarts.Lock() + currentContainerStarts.inProgress-- + currentContainerStarts.Unlock() + }() + } + + var resultp *uint16 + syscallWatcher(computeSystem.logctx, func() { + err = hcsStartComputeSystem(computeSystem.handle, "", &resultp) + }) + events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) + if err != nil { + return makeSystemError(computeSystem, "Start", "", err, events) + } + + return nil +} + +// ID returns the compute system's identifier. +func (computeSystem *System) ID() string { + return computeSystem.id +} + +// Shutdown requests a compute system shutdown, if IsPending() on the error returned is true, +// it may not actually be shut down until Wait() succeeds. +func (computeSystem *System) Shutdown() (err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcsshim::ComputeSystem::Shutdown" + computeSystem.logOperationBegin(operation) + defer func() { + if IsAlreadyStopped(err) { + computeSystem.logOperationEnd(operation, nil) + } else { + computeSystem.logOperationEnd(operation, err) + } + }() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, "Shutdown", "", ErrAlreadyClosed, nil) + } + + var resultp *uint16 + syscallWatcher(computeSystem.logctx, func() { + err = hcsShutdownComputeSystem(computeSystem.handle, "", &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return makeSystemError(computeSystem, "Shutdown", "", err, events) + } + + return nil +} + +// Terminate requests a compute system terminate, if IsPending() on the error returned is true, +// it may not actually be shut down until Wait() succeeds. +func (computeSystem *System) Terminate() (err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcsshim::ComputeSystem::Terminate" + computeSystem.logOperationBegin(operation) + defer func() { + if IsPending(err) { + computeSystem.logOperationEnd(operation, nil) + } else { + computeSystem.logOperationEnd(operation, err) + } + }() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, "Terminate", "", ErrAlreadyClosed, nil) + } + + var resultp *uint16 + syscallWatcher(computeSystem.logctx, func() { + err = hcsTerminateComputeSystem(computeSystem.handle, "", &resultp) + }) + events := processHcsResult(resultp) + if err != nil && err != ErrVmcomputeAlreadyStopped { + return makeSystemError(computeSystem, "Terminate", "", err, events) + } + + return nil +} + +// Wait synchronously waits for the compute system to shutdown or terminate. +func (computeSystem *System) Wait() (err error) { + operation := "hcsshim::ComputeSystem::Wait" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + if err != nil { + return makeSystemError(computeSystem, "Wait", "", err, nil) + } + + return nil +} + +// WaitExpectedError synchronously waits for the compute system to shutdown or +// terminate, and ignores the passed error if it occurs. +func (computeSystem *System) WaitExpectedError(expected error) (err error) { + operation := "hcsshim::ComputeSystem::WaitExpectedError" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + if err != nil && getInnerError(err) != expected { + return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil) + } + + return nil +} + +// WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse. +// If the timeout expires, IsTimeout(err) == true +func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) { + operation := "hcsshim::ComputeSystem::WaitTimeout" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout) + if err != nil { + return makeSystemError(computeSystem, "WaitTimeout", "", err, nil) + } + + return nil +} + +func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcsshim::ComputeSystem::Properties" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + queryj, err := json.Marshal(schema1.PropertyQuery{types}) + if err != nil { + return nil, makeSystemError(computeSystem, "Properties", "", err, nil) + } + + logrus.WithFields(computeSystem.logctx). + WithField(logfields.JSON, queryj). + Debug("HCS ComputeSystem Properties Query") + + var resultp, propertiesp *uint16 + syscallWatcher(computeSystem.logctx, func() { + err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return nil, makeSystemError(computeSystem, "Properties", "", err, events) + } + + if propertiesp == nil { + return nil, ErrUnexpectedValue + } + propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp) + properties := &schema1.ContainerProperties{} + if err := json.Unmarshal(propertiesRaw, properties); err != nil { + return nil, makeSystemError(computeSystem, "Properties", "", err, nil) + } + + return properties, nil +} + +// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Pause() (err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcsshim::ComputeSystem::Pause" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil) + } + + var resultp *uint16 + syscallWatcher(computeSystem.logctx, func() { + err = hcsPauseComputeSystem(computeSystem.handle, "", &resultp) + }) + events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) + if err != nil { + return makeSystemError(computeSystem, "Pause", "", err, events) + } + + return nil +} + +// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Resume() (err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcsshim::ComputeSystem::Resume" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil) + } + + var resultp *uint16 + syscallWatcher(computeSystem.logctx, func() { + err = hcsResumeComputeSystem(computeSystem.handle, "", &resultp) + }) + events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) + if err != nil { + return makeSystemError(computeSystem, "Resume", "", err, events) + } + + return nil +} + +// CreateProcess launches a new process within the computeSystem. +func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcsshim::ComputeSystem::CreateProcess" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + var ( + processInfo hcsProcessInformation + processHandle hcsProcess + resultp *uint16 + ) + + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, "CreateProcess", "", ErrAlreadyClosed, nil) + } + + configurationb, err := json.Marshal(c) + if err != nil { + return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil) + } + + configuration := string(configurationb) + + logrus.WithFields(computeSystem.logctx). + WithField(logfields.JSON, configuration). + Debug("HCS ComputeSystem Process Document") + + syscallWatcher(computeSystem.logctx, func() { + err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return nil, makeSystemError(computeSystem, "CreateProcess", configuration, err, events) + } + + logrus.WithFields(computeSystem.logctx). + WithField(logfields.ProcessID, processInfo.ProcessId). + Debug("HCS ComputeSystem CreateProcess PID") + + process := newProcess(processHandle, int(processInfo.ProcessId), computeSystem) + process.cachedPipes = &cachedPipes{ + stdIn: processInfo.StdInput, + stdOut: processInfo.StdOutput, + stdErr: processInfo.StdError, + } + + if err = process.registerCallback(); err != nil { + return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil) + } + + return process, nil +} + +// OpenProcess gets an interface to an existing process within the computeSystem. +func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + // Add PID for the context of this operation + computeSystem.logctx[logfields.ProcessID] = pid + defer delete(computeSystem.logctx, logfields.ProcessID) + + operation := "hcsshim::ComputeSystem::OpenProcess" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + var ( + processHandle hcsProcess + resultp *uint16 + ) + + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, "OpenProcess", "", ErrAlreadyClosed, nil) + } + + syscallWatcher(computeSystem.logctx, func() { + err = hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return nil, makeSystemError(computeSystem, "OpenProcess", "", err, events) + } + + process := newProcess(processHandle, pid, computeSystem) + if err = process.registerCallback(); err != nil { + return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil) + } + + return process, nil +} + +// Close cleans up any state associated with the compute system but does not terminate or wait for it. +func (computeSystem *System) Close() (err error) { + computeSystem.handleLock.Lock() + defer computeSystem.handleLock.Unlock() + + operation := "hcsshim::ComputeSystem::Close" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + // Don't double free this + if computeSystem.handle == 0 { + return nil + } + + if err = computeSystem.unregisterCallback(); err != nil { + return makeSystemError(computeSystem, "Close", "", err, nil) + } + + syscallWatcher(computeSystem.logctx, func() { + err = hcsCloseComputeSystem(computeSystem.handle) + }) + if err != nil { + return makeSystemError(computeSystem, "Close", "", err, nil) + } + + computeSystem.handle = 0 + + return nil +} + +func (computeSystem *System) registerCallback() error { + context := ¬ifcationWatcherContext{ + channels: newChannels(), + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = context + callbackMapLock.Unlock() + + var callbackHandle hcsCallback + err := hcsRegisterComputeSystemCallback(computeSystem.handle, notificationWatcherCallback, callbackNumber, &callbackHandle) + if err != nil { + return err + } + context.handle = callbackHandle + computeSystem.callbackNumber = callbackNumber + + return nil +} + +func (computeSystem *System) unregisterCallback() error { + callbackNumber := computeSystem.callbackNumber + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return nil + } + + handle := context.handle + + if handle == 0 { + return nil + } + + // hcsUnregisterComputeSystemCallback has its own syncronization + // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := hcsUnregisterComputeSystemCallback(handle) + if err != nil { + return err + } + + closeChannels(context.channels) + + callbackMapLock.Lock() + callbackMap[callbackNumber] = nil + callbackMapLock.Unlock() + + handle = 0 + + return nil +} + +// Modify the System by sending a request to HCS +func (computeSystem *System) Modify(config interface{}) (err error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcsshim::ComputeSystem::Modify" + computeSystem.logOperationBegin(operation) + defer func() { computeSystem.logOperationEnd(operation, err) }() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil) + } + + requestJSON, err := json.Marshal(config) + if err != nil { + return err + } + + requestString := string(requestJSON) + + logrus.WithFields(computeSystem.logctx). + WithField(logfields.JSON, requestString). + Debug("HCS ComputeSystem Modify Document") + + var resultp *uint16 + syscallWatcher(computeSystem.logctx, func() { + err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp) + }) + events := processHcsResult(resultp) + if err != nil { + return makeSystemError(computeSystem, "Modify", requestString, err, events) + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go new file mode 100644 index 00000000..a638677e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -0,0 +1,33 @@ +package hcs + +import ( + "io" + "syscall" + + "github.com/Microsoft/go-winio" +) + +// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles +// if there is an error. +func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { + fs := make([]io.ReadWriteCloser, len(hs)) + for i, h := range hs { + if h != syscall.Handle(0) { + if err == nil { + fs[i], err = winio.MakeOpenFile(h) + } + if err != nil { + syscall.Close(h) + } + } + } + if err != nil { + for _, f := range fs { + if f != nil { + f.Close() + } + } + return nil, err + } + return fs, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go new file mode 100644 index 00000000..91e212c5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go @@ -0,0 +1,63 @@ +package hcs + +import ( + "time" + + "github.com/sirupsen/logrus" +) + +func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { + events := processHcsResult(resultp) + if IsPending(err) { + return nil, waitForNotification(callbackNumber, expectedNotification, timeout) + } + + return events, err +} + +func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { + callbackMapLock.RLock() + channels := callbackMap[callbackNumber].channels + callbackMapLock.RUnlock() + + expectedChannel := channels[expectedNotification] + if expectedChannel == nil { + logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification) + return ErrInvalidNotificationType + } + + var c <-chan time.Time + if timeout != nil { + timer := time.NewTimer(*timeout) + c = timer.C + defer timer.Stop() + } + + select { + case err, ok := <-expectedChannel: + if !ok { + return ErrHandleClose + } + return err + case err, ok := <-channels[hcsNotificationSystemExited]: + if !ok { + return ErrHandleClose + } + // If the expected notification is hcsNotificationSystemExited which of the two selects + // chosen is random. Return the raw error if hcsNotificationSystemExited is expected + if channels[hcsNotificationSystemExited] == expectedChannel { + return err + } + return ErrUnexpectedContainerExit + case _, ok := <-channels[hcsNotificationServiceDisconnect]: + if !ok { + return ErrHandleClose + } + // hcsNotificationServiceDisconnect should never be an expected notification + // it does not need the same handling as hcsNotificationSystemExited + return ErrUnexpectedProcessAbort + case <-c: + return ErrTimeout + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go new file mode 100644 index 00000000..f85ed318 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/watcher.go @@ -0,0 +1,41 @@ +package hcs + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/timeout" + "github.com/sirupsen/logrus" +) + +// syscallWatcher is used as a very simple goroutine around calls into +// the platform. In some cases, we have seen HCS APIs not returning due to +// various bugs, and the goroutine making the syscall ends up not returning, +// prior to its async callback. By spinning up a syscallWatcher, it allows +// us to at least log a warning if a syscall doesn't complete in a reasonable +// amount of time. +// +// Usage is: +// +// syscallWatcher(logContext, func() { +// err = (args...) +// }) +// + +func syscallWatcher(logContext logrus.Fields, syscallLambda func()) { + ctx, cancel := context.WithTimeout(context.Background(), timeout.SyscallWatcher) + defer cancel() + go watchFunc(ctx, logContext) + syscallLambda() +} + +func watchFunc(ctx context.Context, logContext logrus.Fields) { + select { + case <-ctx.Done(): + if ctx.Err() != context.Canceled { + logrus.WithFields(logContext). + WithField(logfields.Timeout, timeout.SyscallWatcher). + Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") + } + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go new file mode 100644 index 00000000..fcd5cdc8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/zsyscall_windows.go @@ -0,0 +1,533 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hcs + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") + procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") + procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") + procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") + procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") + procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") + procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") + procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") + procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") + procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") + procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") + procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") + procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") + procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") + procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") + procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") + procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") + + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") +) + +func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcsEnumerateComputeSystems(_p0, computeSystems, result) +} + +func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { + if hr = procHcsEnumerateComputeSystems.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) +} + +func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) { + if hr = procHcsCreateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _hcsOpenComputeSystem(_p0, computeSystem, result) +} + +func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16) (hr error) { + if hr = procHcsOpenComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) { + if hr = procHcsCloseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsStartComputeSystem(computeSystem, _p0, result) +} + +func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsStartComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsShutdownComputeSystem(computeSystem, _p0, result) +} + +func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsShutdownComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsTerminateComputeSystem(computeSystem, _p0, result) +} + +func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsTerminateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsPauseComputeSystem(computeSystem, _p0, result) +} + +func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsPauseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsResumeComputeSystem(computeSystem, _p0, result) +} + +func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsResumeComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) +} + +func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsModifyComputeSystem(computeSystem, _p0, result) +} + +func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, result **uint16) (hr error) { + if hr = procHcsModifyComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) { + if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) { + if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(processParameters) + if hr != nil { + return + } + return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) +} + +func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) { + if hr = procHcsCreateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) { + if hr = procHcsOpenProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCloseProcess(process hcsProcess) (hr error) { + if hr = procHcsCloseProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) { + if hr = procHcsTerminateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSignalProcess(process, _p0, result) +} + +func _hcsSignalProcess(process hcsProcess, options *uint16, result **uint16) (hr error) { + if hr = procHcsTerminateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) { + if hr = procHcsGetProcessInfo.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) { + if hr = procHcsGetProcessProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyProcess(process, _p0, result) +} + +func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetServiceProperties(_p0, properties, result) +} + +func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetServiceProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) { + if hr = procHcsRegisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) { + if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go new file mode 100644 index 00000000..921c2c85 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go @@ -0,0 +1,47 @@ +package hcserror + +import ( + "fmt" + "syscall" +) + +const ERROR_GEN_FAILURE = syscall.Errno(31) + +type HcsError struct { + title string + rest string + Err error +} + +func (e *HcsError) Error() string { + s := e.title + if len(s) > 0 && s[len(s)-1] != ' ' { + s += " " + } + s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, Win32FromError(e.Err)) + if e.rest != "" { + if e.rest[0] != ' ' { + s += " " + } + s += e.rest + } + return s +} + +func New(err error, title, rest string) error { + // Pass through DLL errors directly since they do not originate from HCS. + if _, ok := err.(*syscall.DLLError); ok { + return err + } + return &HcsError{title, rest, err} +} + +func Win32FromError(err error) uint32 { + if herr, ok := err.(*HcsError); ok { + return Win32FromError(herr.Err) + } + if code, ok := err.(syscall.Errno); ok { + return uint32(code) + } + return uint32(ERROR_GEN_FAILURE) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go new file mode 100644 index 00000000..04517520 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go @@ -0,0 +1,173 @@ +// +build windows + +package hcsoci + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/schemaversion" + "github.com/Microsoft/hcsshim/internal/uvm" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +// CreateOptions are the set of fields used to call CreateContainer(). +// Note: In the spec, the LayerFolders must be arranged in the same way in which +// moby configures them: layern, layern-1,...,layer2,layer1,scratch +// where layer1 is the base read-only layer, layern is the top-most read-only +// layer, and scratch is the RW layer. This is for historical reasons only. +type CreateOptions struct { + + // Common parameters + ID string // Identifier for the container + Owner string // Specifies the owner. Defaults to executable name. + Spec *specs.Spec // Definition of the container or utility VM being created + SchemaVersion *hcsschema.Version // Requested Schema Version. Defaults to v2 for RS5, v1 for RS1..RS4 + HostingSystem *uvm.UtilityVM // Utility or service VM in which the container is to be created. + NetworkNamespace string // Host network namespace to use (overrides anything in the spec) + + // This is an advanced debugging parameter. It allows for diagnosibility by leaving a containers + // resources allocated in case of a failure. Thus you would be able to use tools such as hcsdiag + // to look at the state of a utility VM to see what resources were allocated. Obviously the caller + // must a) not tear down the utility VM on failure (or pause in some way) and b) is responsible for + // performing the ReleaseResources() call themselves. + DoNotReleaseResourcesOnFailure bool +} + +// createOptionsInternal is the set of user-supplied create options, but includes internal +// fields for processing the request once user-supplied stuff has been validated. +type createOptionsInternal struct { + *CreateOptions + + actualSchemaVersion *hcsschema.Version // Calculated based on Windows build and optional caller-supplied override + actualID string // Identifier for the container + actualOwner string // Owner for the container + actualNetworkNamespace string +} + +// CreateContainer creates a container. It can cope with a wide variety of +// scenarios, including v1 HCS schema calls, as well as more complex v2 HCS schema +// calls. Note we always return the resources that have been allocated, even in the +// case of an error. This provides support for the debugging option not to +// release the resources on failure, so that the client can make the necessary +// call to release resources that have been allocated as part of calling this function. +func CreateContainer(createOptions *CreateOptions) (_ *hcs.System, _ *Resources, err error) { + logrus.Debugf("hcsshim::CreateContainer options: %+v", createOptions) + + coi := &createOptionsInternal{ + CreateOptions: createOptions, + actualID: createOptions.ID, + actualOwner: createOptions.Owner, + } + + // Defaults if omitted by caller. + if coi.actualID == "" { + coi.actualID = guid.New().String() + } + if coi.actualOwner == "" { + coi.actualOwner = filepath.Base(os.Args[0]) + } + + if coi.Spec == nil { + return nil, nil, fmt.Errorf("Spec must be supplied") + } + + if coi.HostingSystem != nil { + // By definition, a hosting system can only be supplied for a v2 Xenon. + coi.actualSchemaVersion = schemaversion.SchemaV21() + } else { + coi.actualSchemaVersion = schemaversion.DetermineSchemaVersion(coi.SchemaVersion) + logrus.Debugf("hcsshim::CreateContainer using schema %s", schemaversion.String(coi.actualSchemaVersion)) + } + + resources := &Resources{} + defer func() { + if err != nil { + if !coi.DoNotReleaseResourcesOnFailure { + ReleaseResources(resources, coi.HostingSystem, true) + } + } + }() + + if coi.HostingSystem != nil { + n := coi.HostingSystem.ContainerCounter() + if coi.Spec.Linux != nil { + resources.containerRootInUVM = "/run/gcs/c/" + strconv.FormatUint(n, 16) + } else { + resources.containerRootInUVM = `C:\c\` + strconv.FormatUint(n, 16) + } + } + + // Create a network namespace if necessary. + if coi.Spec.Windows != nil && + coi.Spec.Windows.Network != nil && + schemaversion.IsV21(coi.actualSchemaVersion) { + + if coi.NetworkNamespace != "" { + resources.netNS = coi.NetworkNamespace + } else { + err := createNetworkNamespace(coi, resources) + if err != nil { + return nil, resources, err + } + } + coi.actualNetworkNamespace = resources.netNS + if coi.HostingSystem != nil { + endpoints, err := getNamespaceEndpoints(coi.actualNetworkNamespace) + if err != nil { + return nil, resources, err + } + err = coi.HostingSystem.AddNetNS(coi.actualNetworkNamespace, endpoints) + if err != nil { + return nil, resources, err + } + resources.addedNetNSToVM = true + } + } + + var hcsDocument interface{} + logrus.Debugf("hcsshim::CreateContainer allocating resources") + if coi.Spec.Linux != nil { + if schemaversion.IsV10(coi.actualSchemaVersion) { + return nil, resources, errors.New("LCOW v1 not supported") + } + logrus.Debugf("hcsshim::CreateContainer allocateLinuxResources") + err = allocateLinuxResources(coi, resources) + if err != nil { + logrus.Debugf("failed to allocateLinuxResources %s", err) + return nil, resources, err + } + hcsDocument, err = createLinuxContainerDocument(coi, resources.containerRootInUVM) + if err != nil { + logrus.Debugf("failed createHCSContainerDocument %s", err) + return nil, resources, err + } + } else { + err = allocateWindowsResources(coi, resources) + if err != nil { + logrus.Debugf("failed to allocateWindowsResources %s", err) + return nil, resources, err + } + logrus.Debugf("hcsshim::CreateContainer creating container document") + hcsDocument, err = createWindowsContainerDocument(coi) + if err != nil { + logrus.Debugf("failed createHCSContainerDocument %s", err) + return nil, resources, err + } + } + + logrus.Debugf("hcsshim::CreateContainer creating compute system") + system, err := hcs.CreateComputeSystem(coi.actualID, hcsDocument) + if err != nil { + logrus.Debugf("failed to CreateComputeSystem %s", err) + return nil, resources, err + } + return system, resources, err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create_test.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create_test.go new file mode 100644 index 00000000..aa0b7330 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create_test.go @@ -0,0 +1,78 @@ +// +build windows,functional + +// +// These unit tests must run on a system setup to run both Argons and Xenons, +// have docker installed, and have the nanoserver (WCOW) and alpine (LCOW) +// base images installed. The nanoserver image MUST match the build of the +// host. +// +// We rely on docker as the tools to extract a container image aren't +// open source. We use it to find the location of the base image on disk. +// + +package hcsoci + +//import ( +// "bytes" +// "encoding/json" +// "io/ioutil" +// "os" +// "os/exec" +// "path/filepath" +// "strings" +// "testing" + +// "github.com/Microsoft/hcsshim/internal/schemaversion" +// _ "github.com/Microsoft/hcsshim/test/assets" +// specs "github.com/opencontainers/runtime-spec/specs-go" +// "github.com/sirupsen/logrus" +//) + +//func startUVM(t *testing.T, uvm *UtilityVM) { +// if err := uvm.Start(); err != nil { +// t.Fatalf("UVM %s Failed start: %s", uvm.Id, err) +// } +//} + +//// Helper to shoot a utility VM +//func terminateUtilityVM(t *testing.T, uvm *UtilityVM) { +// if err := uvm.Terminate(); err != nil { +// t.Fatalf("Failed terminate utility VM %s", err) +// } +//} + +//// TODO: Test UVMResourcesFromContainerSpec +//func TestUVMSizing(t *testing.T) { +// t.Skip("for now - not implemented at all") +//} + +//// TestID validates that the requested ID is retrieved +//func TestID(t *testing.T) { +// t.Skip("fornow") +// tempDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(tempDir) + +// layers := append(layersNanoserver, tempDir) +// mountPath, err := mountContainerLayers(layers, nil) +// if err != nil { +// t.Fatalf("failed to mount container storage: %s", err) +// } +// defer unmountContainerLayers(layers, nil, unmountOperationAll) + +// c, err := CreateContainer(&CreateOptions{ +// Id: "gruntbuggly", +// SchemaVersion: schemaversion.SchemaV21(), +// Spec: &specs.Spec{ +// Windows: &specs.Windows{LayerFolders: layers}, +// Root: &specs.Root{Path: mountPath.(string)}, +// }, +// }) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// if c.ID() != "gruntbuggly" { +// t.Fatalf("id not set correctly: %s", c.ID()) +// } + +// c.Terminate() +//} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go new file mode 100644 index 00000000..71b1d037 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go @@ -0,0 +1,115 @@ +// +build windows + +package hcsoci + +import ( + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/schemaversion" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +func createLCOWSpec(coi *createOptionsInternal) (*specs.Spec, error) { + // Remarshal the spec to perform a deep copy. + j, err := json.Marshal(coi.Spec) + if err != nil { + return nil, err + } + spec := &specs.Spec{} + err = json.Unmarshal(j, spec) + if err != nil { + return nil, err + } + + // TODO + // Translate the mounts. The root has already been translated in + // allocateLinuxResources. + /* + for i := range spec.Mounts { + spec.Mounts[i].Source = "???" + spec.Mounts[i].Destination = "???" + } + */ + + // Linux containers don't care about Windows aspects of the spec except the + // network namespace + spec.Windows = nil + if coi.Spec.Windows != nil && + coi.Spec.Windows.Network != nil && + coi.Spec.Windows.Network.NetworkNamespace != "" { + spec.Windows = &specs.Windows{ + Network: &specs.WindowsNetwork{ + NetworkNamespace: coi.Spec.Windows.Network.NetworkNamespace, + }, + } + } + + // Hooks are not supported (they should be run in the host) + spec.Hooks = nil + + // Clear unsupported features + if spec.Linux.Resources != nil { + spec.Linux.Resources.Devices = nil + spec.Linux.Resources.Memory = nil + spec.Linux.Resources.Pids = nil + spec.Linux.Resources.BlockIO = nil + spec.Linux.Resources.HugepageLimits = nil + spec.Linux.Resources.Network = nil + } + spec.Linux.Seccomp = nil + + // Clear any specified namespaces + var namespaces []specs.LinuxNamespace + for _, ns := range spec.Linux.Namespaces { + switch ns.Type { + case specs.NetworkNamespace: + default: + ns.Path = "" + namespaces = append(namespaces, ns) + } + } + spec.Linux.Namespaces = namespaces + + return spec, nil +} + +// This is identical to hcsschema.ComputeSystem but HostedSystem is an LCOW specific type - the schema docs only include WCOW. +type linuxComputeSystem struct { + Owner string `json:"Owner,omitempty"` + SchemaVersion *hcsschema.Version `json:"SchemaVersion,omitempty"` + HostingSystemId string `json:"HostingSystemId,omitempty"` + HostedSystem *linuxHostedSystem `json:"HostedSystem,omitempty"` + Container *hcsschema.Container `json:"Container,omitempty"` + VirtualMachine *hcsschema.VirtualMachine `json:"VirtualMachine,omitempty"` + ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"` +} + +type linuxHostedSystem struct { + SchemaVersion *hcsschema.Version + OciBundlePath string + OciSpecification *specs.Spec +} + +func createLinuxContainerDocument(coi *createOptionsInternal, guestRoot string) (interface{}, error) { + spec, err := createLCOWSpec(coi) + if err != nil { + return nil, err + } + + logrus.Debugf("hcsshim::createLinuxContainerDoc: guestRoot:%s", guestRoot) + v2 := &linuxComputeSystem{ + Owner: coi.actualOwner, + SchemaVersion: schemaversion.SchemaV21(), + ShouldTerminateOnLastHandleClosed: true, + HostingSystemId: coi.HostingSystem.ID(), + HostedSystem: &linuxHostedSystem{ + SchemaVersion: schemaversion.SchemaV21(), + OciBundlePath: guestRoot, + OciSpecification: spec, + }, + } + + return v2, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go new file mode 100644 index 00000000..8fce2f3f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go @@ -0,0 +1,273 @@ +// +build windows + +package hcsoci + +import ( + "fmt" + "path/filepath" + "regexp" + "runtime" + "strings" + + "github.com/Microsoft/hcsshim/internal/schema1" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/schemaversion" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/uvmfolder" + "github.com/Microsoft/hcsshim/internal/wclayer" + "github.com/Microsoft/hcsshim/osversion" + "github.com/sirupsen/logrus" +) + +// createWindowsContainerDocument creates a document suitable for calling HCS to create +// a container, both hosted and process isolated. It can create both v1 and v2 +// schema, WCOW only. The containers storage should have been mounted already. +func createWindowsContainerDocument(coi *createOptionsInternal) (interface{}, error) { + logrus.Debugf("hcsshim: CreateHCSContainerDocument") + // TODO: Make this safe if exported so no null pointer dereferences. + + if coi.Spec == nil { + return nil, fmt.Errorf("cannot create HCS container document - OCI spec is missing") + } + + if coi.Spec.Windows == nil { + return nil, fmt.Errorf("cannot create HCS container document - OCI spec Windows section is missing ") + } + + v1 := &schema1.ContainerConfig{ + SystemType: "Container", + Name: coi.actualID, + Owner: coi.actualOwner, + HvPartition: false, + IgnoreFlushesDuringBoot: coi.Spec.Windows.IgnoreFlushesDuringBoot, + } + + // IgnoreFlushesDuringBoot is a property of the SCSI attachment for the scratch. Set when it's hot-added to the utility VM + // ID is a property on the create call in V2 rather than part of the schema. + v2 := &hcsschema.ComputeSystem{ + Owner: coi.actualOwner, + SchemaVersion: schemaversion.SchemaV21(), + ShouldTerminateOnLastHandleClosed: true, + } + v2Container := &hcsschema.Container{Storage: &hcsschema.Storage{}} + + // TODO: Still want to revisit this. + if coi.Spec.Windows.LayerFolders == nil || len(coi.Spec.Windows.LayerFolders) < 2 { + return nil, fmt.Errorf("invalid spec - not enough layer folders supplied") + } + + if coi.Spec.Hostname != "" { + v1.HostName = coi.Spec.Hostname + v2Container.GuestOs = &hcsschema.GuestOs{HostName: coi.Spec.Hostname} + } + + if coi.Spec.Windows.Resources != nil { + if coi.Spec.Windows.Resources.CPU != nil { + if coi.Spec.Windows.Resources.CPU.Count != nil || + coi.Spec.Windows.Resources.CPU.Shares != nil || + coi.Spec.Windows.Resources.CPU.Maximum != nil { + v2Container.Processor = &hcsschema.Processor{} + } + if coi.Spec.Windows.Resources.CPU.Count != nil { + cpuCount := *coi.Spec.Windows.Resources.CPU.Count + hostCPUCount := uint64(runtime.NumCPU()) + if cpuCount > hostCPUCount { + logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) + cpuCount = hostCPUCount + } + v1.ProcessorCount = uint32(cpuCount) + v2Container.Processor.Count = int32(cpuCount) + } + if coi.Spec.Windows.Resources.CPU.Shares != nil { + v1.ProcessorWeight = uint64(*coi.Spec.Windows.Resources.CPU.Shares) + v2Container.Processor.Weight = int32(v1.ProcessorWeight) + } + if coi.Spec.Windows.Resources.CPU.Maximum != nil { + v1.ProcessorMaximum = int64(*coi.Spec.Windows.Resources.CPU.Maximum) + v2Container.Processor.Maximum = int32(v1.ProcessorMaximum) + } + } + if coi.Spec.Windows.Resources.Memory != nil { + if coi.Spec.Windows.Resources.Memory.Limit != nil { + v1.MemoryMaximumInMB = int64(*coi.Spec.Windows.Resources.Memory.Limit) / 1024 / 1024 + v2Container.Memory = &hcsschema.Memory{SizeInMB: int32(v1.MemoryMaximumInMB)} + + } + } + if coi.Spec.Windows.Resources.Storage != nil { + if coi.Spec.Windows.Resources.Storage.Bps != nil || coi.Spec.Windows.Resources.Storage.Iops != nil { + v2Container.Storage.QoS = &hcsschema.StorageQoS{} + } + if coi.Spec.Windows.Resources.Storage.Bps != nil { + v1.StorageBandwidthMaximum = *coi.Spec.Windows.Resources.Storage.Bps + v2Container.Storage.QoS.BandwidthMaximum = int32(v1.StorageBandwidthMaximum) + } + if coi.Spec.Windows.Resources.Storage.Iops != nil { + v1.StorageIOPSMaximum = *coi.Spec.Windows.Resources.Storage.Iops + v2Container.Storage.QoS.IopsMaximum = int32(*coi.Spec.Windows.Resources.Storage.Iops) + } + } + } + + // TODO V2 networking. Only partial at the moment. v2.Container.Networking.Namespace specifically + if coi.Spec.Windows.Network != nil { + v2Container.Networking = &hcsschema.Networking{} + + v1.EndpointList = coi.Spec.Windows.Network.EndpointList + v2Container.Networking.Namespace = coi.actualNetworkNamespace + + v1.AllowUnqualifiedDNSQuery = coi.Spec.Windows.Network.AllowUnqualifiedDNSQuery + v2Container.Networking.AllowUnqualifiedDnsQuery = v1.AllowUnqualifiedDNSQuery + + if coi.Spec.Windows.Network.DNSSearchList != nil { + v1.DNSSearchList = strings.Join(coi.Spec.Windows.Network.DNSSearchList, ",") + v2Container.Networking.DnsSearchList = v1.DNSSearchList + } + + v1.NetworkSharedContainerName = coi.Spec.Windows.Network.NetworkSharedContainerName + v2Container.Networking.NetworkSharedContainerName = v1.NetworkSharedContainerName + } + + // // TODO V2 Credentials not in the schema yet. + if cs, ok := coi.Spec.Windows.CredentialSpec.(string); ok { + v1.Credentials = cs + } + + if coi.Spec.Root == nil { + return nil, fmt.Errorf("spec is invalid - root isn't populated") + } + + if coi.Spec.Root.Readonly { + return nil, fmt.Errorf(`invalid container spec - readonly is not supported for Windows containers`) + } + + // Strip off the top-most RW/scratch layer as that's passed in separately to HCS for v1 + v1.LayerFolderPath = coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1] + + if (schemaversion.IsV21(coi.actualSchemaVersion) && coi.HostingSystem == nil) || + (schemaversion.IsV10(coi.actualSchemaVersion) && coi.Spec.Windows.HyperV == nil) { + // Argon v1 or v2. + const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$` + if matched, err := regexp.MatchString(volumeGUIDRegex, coi.Spec.Root.Path); !matched || err != nil { + return nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, coi.Spec.Root.Path) + } + if coi.Spec.Root.Path[len(coi.Spec.Root.Path)-1] != '\\' { + coi.Spec.Root.Path += `\` // Be nice to clients and make sure well-formed for back-compat + } + v1.VolumePath = coi.Spec.Root.Path[:len(coi.Spec.Root.Path)-1] // Strip the trailing backslash. Required for v1. + v2Container.Storage.Path = coi.Spec.Root.Path + } else { + // A hosting system was supplied, implying v2 Xenon; OR a v1 Xenon. + if schemaversion.IsV10(coi.actualSchemaVersion) { + // V1 Xenon + v1.HvPartition = true + if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.HyperV == nil { // Be resilient to nil de-reference + return nil, fmt.Errorf(`invalid container spec - Spec.Windows.HyperV is nil`) + } + if coi.Spec.Windows.HyperV.UtilityVMPath != "" { + // Client-supplied utility VM path + v1.HvRuntime = &schema1.HvRuntime{ImagePath: coi.Spec.Windows.HyperV.UtilityVMPath} + } else { + // Client was lazy. Let's locate it from the layer folders instead. + uvmImagePath, err := uvmfolder.LocateUVMFolder(coi.Spec.Windows.LayerFolders) + if err != nil { + return nil, err + } + v1.HvRuntime = &schema1.HvRuntime{ImagePath: filepath.Join(uvmImagePath, `UtilityVM`)} + } + } else { + // Hosting system was supplied, so is v2 Xenon. + v2Container.Storage.Path = coi.Spec.Root.Path + if coi.HostingSystem.OS() == "windows" { + layers, err := computeV2Layers(coi.HostingSystem, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1]) + if err != nil { + return nil, err + } + v2Container.Storage.Layers = layers + } + } + } + + if coi.HostingSystem == nil { // Argon v1 or v2 + for _, layerPath := range coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1] { + layerID, err := wclayer.LayerID(layerPath) + if err != nil { + return nil, err + } + v1.Layers = append(v1.Layers, schema1.Layer{ID: layerID.String(), Path: layerPath}) + v2Container.Storage.Layers = append(v2Container.Storage.Layers, hcsschema.Layer{Id: layerID.String(), Path: layerPath}) + } + } + + // Add the mounts as mapped directories or mapped pipes + // TODO: Mapped pipes to add in v2 schema. + var ( + mdsv1 []schema1.MappedDir + mpsv1 []schema1.MappedPipe + mdsv2 []hcsschema.MappedDirectory + mpsv2 []hcsschema.MappedPipe + ) + for _, mount := range coi.Spec.Mounts { + const pipePrefix = `\\.\pipe\` + if mount.Type != "" { + return nil, fmt.Errorf("invalid container spec - Mount.Type '%s' must not be set", mount.Type) + } + if strings.HasPrefix(strings.ToLower(mount.Destination), pipePrefix) { + mpsv1 = append(mpsv1, schema1.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]}) + mpsv2 = append(mpsv2, hcsschema.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]}) + } else { + readOnly := false + for _, o := range mount.Options { + if strings.ToLower(o) == "ro" { + readOnly = true + } + } + mdv1 := schema1.MappedDir{HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: readOnly} + mdv2 := hcsschema.MappedDirectory{ContainerPath: mount.Destination, ReadOnly: readOnly} + if coi.HostingSystem == nil { + mdv2.HostPath = mount.Source + } else { + uvmPath, err := coi.HostingSystem.GetVSMBUvmPath(mount.Source) + if err != nil { + if err == uvm.ErrNotAttached { + // It could also be a scsi mount. + uvmPath, err = coi.HostingSystem.GetScsiUvmPath(mount.Source) + if err != nil { + return nil, err + } + } else { + return nil, err + } + } + mdv2.HostPath = uvmPath + } + mdsv1 = append(mdsv1, mdv1) + mdsv2 = append(mdsv2, mdv2) + } + } + + v1.MappedDirectories = mdsv1 + v2Container.MappedDirectories = mdsv2 + if len(mpsv1) > 0 && osversion.Get().Build < osversion.RS3 { + return nil, fmt.Errorf("named pipe mounts are not supported on this version of Windows") + } + v1.MappedPipes = mpsv1 + v2Container.MappedPipes = mpsv2 + + // Put the v2Container object as a HostedSystem for a Xenon, or directly in the schema for an Argon. + if coi.HostingSystem == nil { + v2.Container = v2Container + } else { + v2.HostingSystemId = coi.HostingSystem.ID() + v2.HostedSystem = &hcsschema.HostedSystem{ + SchemaVersion: schemaversion.SchemaV21(), + Container: v2Container, + } + } + + if schemaversion.IsV10(coi.actualSchemaVersion) { + return v1, nil + } + + return v2, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/layers.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/layers.go new file mode 100644 index 00000000..dabca160 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/layers.go @@ -0,0 +1,373 @@ +// +build windows + +package hcsoci + +import ( + "fmt" + "os" + "path" + "path/filepath" + + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/ospath" + "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/wclayer" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type lcowLayerEntry struct { + hostPath string + uvmPath string + scsi bool +} + +const scratchPath = "scratch" + +// mountContainerLayers is a helper for clients to hide all the complexity of layer mounting +// Layer folder are in order: base, [rolayer1..rolayern,] scratch +// +// v1/v2: Argon WCOW: Returns the mount path on the host as a volume GUID. +// v1: Xenon WCOW: Done internally in HCS, so no point calling doing anything here. +// v2: Xenon WCOW: Returns a CombinedLayersV2 structure where ContainerRootPath is a folder +// inside the utility VM which is a GUID mapping of the scratch folder. Each +// of the layers are the VSMB locations where the read-only layers are mounted. +// +func MountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM) (interface{}, error) { + logrus.Debugln("hcsshim::mountContainerLayers", layerFolders) + + if uvm == nil { + if len(layerFolders) < 2 { + return nil, fmt.Errorf("need at least two layers - base and scratch") + } + path := layerFolders[len(layerFolders)-1] + rest := layerFolders[:len(layerFolders)-1] + logrus.Debugln("hcsshim::mountContainerLayers ActivateLayer", path) + if err := wclayer.ActivateLayer(path); err != nil { + return nil, err + } + logrus.Debugln("hcsshim::mountContainerLayers Preparelayer", path, rest) + if err := wclayer.PrepareLayer(path, rest); err != nil { + if err2 := wclayer.DeactivateLayer(path); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", path, err) + } + return nil, err + } + + mountPath, err := wclayer.GetLayerMountPath(path) + if err != nil { + if err := wclayer.UnprepareLayer(path); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", path, err) + } + if err2 := wclayer.DeactivateLayer(path); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", path, err) + } + return nil, err + } + return mountPath, nil + } + + // V2 UVM + logrus.Debugf("hcsshim::mountContainerLayers Is a %s V2 UVM", uvm.OS()) + + // Add each read-only layers. For Windows, this is a VSMB share with the ResourceUri ending in + // a GUID based on the folder path. For Linux, this is a VPMEM device, except where is over the + // max size supported, where we put it on SCSI instead. + // + // Each layer is ref-counted so that multiple containers in the same utility VM can share them. + var wcowLayersAdded []string + var lcowlayersAdded []lcowLayerEntry + attachedSCSIHostPath := "" + + for _, layerPath := range layerFolders[:len(layerFolders)-1] { + var err error + if uvm.OS() == "windows" { + options := &hcsschema.VirtualSmbShareOptions{ + ReadOnly: true, + PseudoOplocks: true, + TakeBackupPrivilege: true, + CacheIo: true, + ShareRead: true, + } + err = uvm.AddVSMB(layerPath, "", options) + if err == nil { + wcowLayersAdded = append(wcowLayersAdded, layerPath) + } + } else { + uvmPath := "" + hostPath := filepath.Join(layerPath, "layer.vhd") + + var fi os.FileInfo + fi, err = os.Stat(hostPath) + if err == nil && uint64(fi.Size()) > uvm.PMemMaxSizeBytes() { + // Too big for PMEM. Add on SCSI instead (at /tmp/S/). + var ( + controller int + lun int32 + ) + controller, lun, err = uvm.AddSCSILayer(hostPath) + if err == nil { + lcowlayersAdded = append(lcowlayersAdded, + lcowLayerEntry{ + hostPath: hostPath, + uvmPath: fmt.Sprintf("/tmp/S%d/%d", controller, lun), + scsi: true, + }) + } + } else { + _, uvmPath, err = uvm.AddVPMEM(hostPath, true) // UVM path is calculated. Will be /tmp/vN/ + if err == nil { + lcowlayersAdded = append(lcowlayersAdded, + lcowLayerEntry{ + hostPath: hostPath, + uvmPath: uvmPath, + }) + } + } + } + if err != nil { + cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath) + return nil, err + } + } + + // Add the scratch at an unused SCSI location. The container path inside the + // utility VM will be C:\. + hostPath := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx") + + // BUGBUG Rename guestRoot better. + containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath) + _, _, err := uvm.AddSCSI(hostPath, containerScratchPathInUVM, false) + if err != nil { + cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath) + return nil, err + } + attachedSCSIHostPath = hostPath + + if uvm.OS() == "windows" { + // Load the filter at the C:\s location calculated above. We pass into this request each of the + // read-only layer folders. + layers, err := computeV2Layers(uvm, wcowLayersAdded) + if err != nil { + cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath) + return nil, err + } + guestRequest := guestrequest.CombinedLayers{ + ContainerRootPath: containerScratchPathInUVM, + Layers: layers, + } + combinedLayersModification := &hcsschema.ModifySettingRequest{ + GuestRequest: guestrequest.GuestRequest{ + Settings: guestRequest, + ResourceType: guestrequest.ResourceTypeCombinedLayers, + RequestType: requesttype.Add, + }, + } + if err := uvm.Modify(combinedLayersModification); err != nil { + cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath) + return nil, err + } + logrus.Debugln("hcsshim::mountContainerLayers Succeeded") + return guestRequest, nil + } + + // This is the LCOW layout inside the utilityVM. NNN is the container "number" + // which increments for each container created in a utility VM. + // + // /run/gcs/c/NNN/config.json + // /run/gcs/c/NNN/rootfs + // /run/gcs/c/NNN/scratch/upper + // /run/gcs/c/NNN/scratch/work + // + // /dev/sda on /tmp/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl) + // /dev/pmem0 on /tmp/v0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl) + // /dev/sdb on /run/gcs/c/NNN/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl) + // overlay on /run/gcs/c/NNN/rootfs type overlay (rw,relatime,lowerdir=/tmp/v0,upperdir=/run/gcs/c/NNN/scratch/upper,workdir=/run/gcs/c/NNN/scratch/work) + // + // Where /dev/sda is the scratch for utility VM itself + // /dev/pmemX are read-only layers for containers + // /dev/sd(b...) are scratch spaces for each container + + layers := []hcsschema.Layer{} + for _, l := range lcowlayersAdded { + layers = append(layers, hcsschema.Layer{Path: l.uvmPath}) + } + guestRequest := guestrequest.CombinedLayers{ + ContainerRootPath: path.Join(guestRoot, rootfsPath), + Layers: layers, + ScratchPath: containerScratchPathInUVM, + } + combinedLayersModification := &hcsschema.ModifySettingRequest{ + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeCombinedLayers, + RequestType: requesttype.Add, + Settings: guestRequest, + }, + } + if err := uvm.Modify(combinedLayersModification); err != nil { + cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath) + return nil, err + } + logrus.Debugln("hcsshim::mountContainerLayers Succeeded") + return guestRequest, nil + +} + +// UnmountOperation is used when calling Unmount() to determine what type of unmount is +// required. In V1 schema, this must be unmountOperationAll. In V2, client can +// be more optimal and only unmount what they need which can be a minor performance +// improvement (eg if you know only one container is running in a utility VM, and +// the UVM is about to be torn down, there's no need to unmount the VSMB shares, +// just SCSI to have a consistent file system). +type UnmountOperation uint + +const ( + UnmountOperationSCSI UnmountOperation = 0x01 + UnmountOperationVSMB = 0x02 + UnmountOperationVPMEM = 0x04 + UnmountOperationAll = UnmountOperationSCSI | UnmountOperationVSMB | UnmountOperationVPMEM +) + +// UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting +func UnmountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM, op UnmountOperation) error { + logrus.Debugln("hcsshim::unmountContainerLayers", layerFolders) + if uvm == nil { + // Must be an argon - folders are mounted on the host + if op != UnmountOperationAll { + return fmt.Errorf("only operation supported for host-mounted folders is unmountOperationAll") + } + if len(layerFolders) < 1 { + return fmt.Errorf("need at least one layer for Unmount") + } + path := layerFolders[len(layerFolders)-1] + logrus.Debugln("hcsshim::Unmount UnprepareLayer", path) + if err := wclayer.UnprepareLayer(path); err != nil { + return err + } + // TODO Should we try this anyway? + logrus.Debugln("hcsshim::unmountContainerLayers DeactivateLayer", path) + return wclayer.DeactivateLayer(path) + } + + // V2 Xenon + + // Base+Scratch as a minimum. This is different to v1 which only requires the scratch + if len(layerFolders) < 2 { + return fmt.Errorf("at least two layers are required for unmount") + } + + var retError error + + // Unload the storage filter followed by the SCSI scratch + if (op & UnmountOperationSCSI) == UnmountOperationSCSI { + containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath) + logrus.Debugf("hcsshim::unmountContainerLayers CombinedLayers %s", containerScratchPathInUVM) + combinedLayersModification := &hcsschema.ModifySettingRequest{ + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeCombinedLayers, + RequestType: requesttype.Remove, + Settings: guestrequest.CombinedLayers{ContainerRootPath: containerScratchPathInUVM}, + }, + } + if err := uvm.Modify(combinedLayersModification); err != nil { + logrus.Errorf(err.Error()) + } + + // Hot remove the scratch from the SCSI controller + hostScratchFile := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx") + logrus.Debugf("hcsshim::unmountContainerLayers SCSI %s %s", containerScratchPathInUVM, hostScratchFile) + if err := uvm.RemoveSCSI(hostScratchFile); err != nil { + e := fmt.Errorf("failed to remove SCSI %s: %s", hostScratchFile, err) + logrus.Debugln(e) + if retError == nil { + retError = e + } else { + retError = errors.Wrapf(retError, e.Error()) + } + } + } + + // Remove each of the read-only layers from VSMB. These's are ref-counted and + // only removed once the count drops to zero. This allows multiple containers + // to share layers. + if uvm.OS() == "windows" && len(layerFolders) > 1 && (op&UnmountOperationVSMB) == UnmountOperationVSMB { + for _, layerPath := range layerFolders[:len(layerFolders)-1] { + if e := uvm.RemoveVSMB(layerPath); e != nil { + logrus.Debugln(e) + if retError == nil { + retError = e + } else { + retError = errors.Wrapf(retError, e.Error()) + } + } + } + } + + // Remove each of the read-only layers from VPMEM (or SCSI). These's are ref-counted + // and only removed once the count drops to zero. This allows multiple containers to + // share layers. Note that SCSI is used on large layers. + if uvm.OS() == "linux" && len(layerFolders) > 1 && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM { + for _, layerPath := range layerFolders[:len(layerFolders)-1] { + hostPath := filepath.Join(layerPath, "layer.vhd") + if fi, err := os.Stat(hostPath); err != nil { + var e error + if uint64(fi.Size()) > uvm.PMemMaxSizeBytes() { + e = uvm.RemoveSCSI(hostPath) + } else { + e = uvm.RemoveVPMEM(hostPath) + } + if e != nil { + logrus.Debugln(e) + if retError == nil { + retError = e + } else { + retError = errors.Wrapf(retError, e.Error()) + } + } + } + } + } + + // TODO (possibly) Consider deleting the container directory in the utility VM + + return retError +} + +func cleanupOnMountFailure(uvm *uvm.UtilityVM, wcowLayers []string, lcowLayers []lcowLayerEntry, scratchHostPath string) { + for _, wl := range wcowLayers { + if err := uvm.RemoveVSMB(wl); err != nil { + logrus.Warnf("Possibly leaked vsmbshare on error removal path: %s", err) + } + } + for _, ll := range lcowLayers { + if ll.scsi { + if err := uvm.RemoveSCSI(ll.hostPath); err != nil { + logrus.Warnf("Possibly leaked SCSI on error removal path: %s", err) + } + } else if err := uvm.RemoveVPMEM(ll.hostPath); err != nil { + logrus.Warnf("Possibly leaked vpmemdevice on error removal path: %s", err) + } + } + if scratchHostPath != "" { + if err := uvm.RemoveSCSI(scratchHostPath); err != nil { + logrus.Warnf("Possibly leaked SCSI disk on error removal path: %s", err) + } + } +} + +func computeV2Layers(vm *uvm.UtilityVM, paths []string) (layers []hcsschema.Layer, err error) { + for _, path := range paths { + uvmPath, err := vm.GetVSMBUvmPath(path) + if err != nil { + return nil, err + } + layerID, err := wclayer.LayerID(path) + if err != nil { + return nil, err + } + layers = append(layers, hcsschema.Layer{Id: layerID.String(), Path: uvmPath}) + } + return layers, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go new file mode 100644 index 00000000..f880d8b3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go @@ -0,0 +1,41 @@ +package hcsoci + +import ( + "github.com/Microsoft/hcsshim/internal/hns" + "github.com/sirupsen/logrus" +) + +func createNetworkNamespace(coi *createOptionsInternal, resources *Resources) error { + netID, err := hns.CreateNamespace() + if err != nil { + return err + } + logrus.Infof("created network namespace %s for %s", netID, coi.ID) + resources.netNS = netID + resources.createdNetNS = true + for _, endpointID := range coi.Spec.Windows.Network.EndpointList { + err = hns.AddNamespaceEndpoint(netID, endpointID) + if err != nil { + return err + } + logrus.Infof("added network endpoint %s to namespace %s", endpointID, netID) + resources.networkEndpoints = append(resources.networkEndpoints, endpointID) + } + return nil +} + +func getNamespaceEndpoints(netNS string) ([]*hns.HNSEndpoint, error) { + ids, err := hns.GetNamespaceEndpoints(netNS) + if err != nil { + return nil, err + } + var endpoints []*hns.HNSEndpoint + for _, id := range ids { + endpoint, err := hns.GetHNSEndpointByID(id) + if err != nil { + return nil, err + } + endpoints = append(endpoints, endpoint) + } + return endpoints, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources.go new file mode 100644 index 00000000..ffbe8555 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources.go @@ -0,0 +1,127 @@ +package hcsoci + +import ( + "os" + + "github.com/Microsoft/hcsshim/internal/hns" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/sirupsen/logrus" +) + +// NetNS returns the network namespace for the container +func (r *Resources) NetNS() string { + return r.netNS +} + +// Resources is the structure returned as part of creating a container. It holds +// nothing useful to clients, hence everything is lowercased. A client would use +// it in a call to ReleaseResource to ensure everything is cleaned up when a +// container exits. +type Resources struct { + // containerRootInUVM is the base path in a utility VM where elements relating + // to a container are exposed. For example, the mounted filesystem; the runtime + // spec (in the case of LCOW); overlay and scratch (in the case of LCOW). + // + // For WCOW, this will be under C:\c\N, and for LCOW this will + // be under /run/gcs/c/N. N is an atomic counter for each container created + // in that utility VM. For LCOW this is also the "OCI Bundle Path". + containerRootInUVM string + + // layers is an array of the layer folder paths which have been mounted either on + // the host in the case or a WCOW Argon, or in a utility VM for WCOW Xenon and LCOW. + layers []string + + // vsmbMounts is an array of the host-paths mounted into a utility VM to support + // (bind-)mounts into a WCOW v2 Xenon. + vsmbMounts []string + + // plan9Mounts is an array of all the host paths which have been added to + // an LCOW utility VM + plan9Mounts []string + + // netNS is the network namespace + netNS string + + // networkEndpoints is the list of network endpoints used by the container + networkEndpoints []string + + // createNetNS indicates if the network namespace has been created + createdNetNS bool + + // addedNetNSToVM indicates if the network namespace has been added to the containers utility VM + addedNetNSToVM bool + + // scsiMounts is an array of the host-paths mounted into a utility VM to + // support scsi device passthrough. + scsiMounts []string +} + +// TODO: Method on the resources? +func ReleaseResources(r *Resources, vm *uvm.UtilityVM, all bool) error { + if vm != nil && r.addedNetNSToVM { + err := vm.RemoveNetNS(r.netNS) + if err != nil { + logrus.Warn(err) + } + r.addedNetNSToVM = false + } + + if r.createdNetNS { + for len(r.networkEndpoints) != 0 { + endpoint := r.networkEndpoints[len(r.networkEndpoints)-1] + err := hns.RemoveNamespaceEndpoint(r.netNS, endpoint) + if err != nil { + if !os.IsNotExist(err) { + return err + } + logrus.Warnf("removing endpoint %s from namespace %s: does not exist", endpoint, r.NetNS()) + } + r.networkEndpoints = r.networkEndpoints[:len(r.networkEndpoints)-1] + } + r.networkEndpoints = nil + err := hns.RemoveNamespace(r.netNS) + if err != nil && !os.IsNotExist(err) { + return err + } + r.createdNetNS = false + } + + if len(r.layers) != 0 { + op := UnmountOperationSCSI + if vm == nil || all { + op = UnmountOperationAll + } + err := UnmountContainerLayers(r.layers, r.containerRootInUVM, vm, op) + if err != nil { + return err + } + r.layers = nil + } + + if all { + for len(r.vsmbMounts) != 0 { + mount := r.vsmbMounts[len(r.vsmbMounts)-1] + if err := vm.RemoveVSMB(mount); err != nil { + return err + } + r.vsmbMounts = r.vsmbMounts[:len(r.vsmbMounts)-1] + } + + for len(r.plan9Mounts) != 0 { + mount := r.plan9Mounts[len(r.plan9Mounts)-1] + if err := vm.RemovePlan9(mount); err != nil { + return err + } + r.plan9Mounts = r.plan9Mounts[:len(r.plan9Mounts)-1] + } + + for _, path := range r.scsiMounts { + if err := vm.RemoveSCSI(path); err != nil { + return err + } + r.scsiMounts = nil + } + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go new file mode 100644 index 00000000..2de308d0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go @@ -0,0 +1,104 @@ +// +build windows + +package hcsoci + +// Contains functions relating to a LCOW container, as opposed to a utility VM + +import ( + "fmt" + "path" + "strconv" + "strings" + + "github.com/Microsoft/hcsshim/internal/guestrequest" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +const rootfsPath = "rootfs" +const mountPathPrefix = "m" + +func allocateLinuxResources(coi *createOptionsInternal, resources *Resources) error { + if coi.Spec.Root == nil { + coi.Spec.Root = &specs.Root{} + } + if coi.Spec.Root.Path == "" { + logrus.Debugln("hcsshim::allocateLinuxResources mounting storage") + mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem) + if err != nil { + return fmt.Errorf("failed to mount container storage: %s", err) + } + if coi.HostingSystem == nil { + coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2 + } else { + coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon LCOW + } + resources.layers = coi.Spec.Windows.LayerFolders + } else { + // This is the "Plan 9" root filesystem. + // TODO: We need a test for this. Ask @jstarks how you can even lay this out on Windows. + hostPath := coi.Spec.Root.Path + uvmPathForContainersFileSystem := path.Join(resources.containerRootInUVM, rootfsPath) + err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForContainersFileSystem, coi.Spec.Root.Readonly) + if err != nil { + return fmt.Errorf("adding plan9 root: %s", err) + } + coi.Spec.Root.Path = uvmPathForContainersFileSystem + resources.plan9Mounts = append(resources.plan9Mounts, hostPath) + } + + for i, mount := range coi.Spec.Mounts { + switch mount.Type { + case "bind": + case "physical-disk": + case "virtual-disk": + default: + // Unknown mount type + continue + } + if mount.Destination == "" || mount.Source == "" { + return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount) + } + + if coi.HostingSystem != nil { + hostPath := mount.Source + uvmPathForShare := path.Join(resources.containerRootInUVM, mountPathPrefix+strconv.Itoa(i)) + + readOnly := false + for _, o := range mount.Options { + if strings.ToLower(o) == "ro" { + readOnly = true + break + } + } + + if mount.Type == "physical-disk" { + logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount %+v", mount) + _, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(hostPath, uvmPathForShare, readOnly) + if err != nil { + return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err) + } + resources.scsiMounts = append(resources.scsiMounts, hostPath) + coi.Spec.Mounts[i].Type = "none" + } else if mount.Type == "virtual-disk" { + logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI virtual disk for OCI mount %+v", mount) + _, _, err := coi.HostingSystem.AddSCSI(hostPath, uvmPathForShare, readOnly) + if err != nil { + return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err) + } + resources.scsiMounts = append(resources.scsiMounts, hostPath) + coi.Spec.Mounts[i].Type = "none" + } else { + logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding Plan9 for OCI mount %+v", mount) + err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForShare, readOnly) + if err != nil { + return fmt.Errorf("adding plan9 mount %+v: %s", mount, err) + } + resources.plan9Mounts = append(resources.plan9Mounts, hostPath) + } + coi.Spec.Mounts[i].Source = uvmPathForShare + } + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go new file mode 100644 index 00000000..046fdfae --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go @@ -0,0 +1,127 @@ +// +build windows + +package hcsoci + +// Contains functions relating to a WCOW container, as opposed to a utility VM + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/schemaversion" + "github.com/Microsoft/hcsshim/internal/wclayer" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +func allocateWindowsResources(coi *createOptionsInternal, resources *Resources) error { + if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.LayerFolders == nil { + return fmt.Errorf("field 'Spec.Windows.Layerfolders' is not populated") + } + + scratchFolder := coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1] + logrus.Debugf("hcsshim::allocateWindowsResources scratch folder: %s", scratchFolder) + + // TODO: Remove this code for auto-creation. Make the caller responsible. + // Create the directory for the RW scratch layer if it doesn't exist + if _, err := os.Stat(scratchFolder); os.IsNotExist(err) { + logrus.Debugf("hcsshim::allocateWindowsResources container scratch folder does not exist so creating: %s ", scratchFolder) + if err := os.MkdirAll(scratchFolder, 0777); err != nil { + return fmt.Errorf("failed to auto-create container scratch folder %s: %s", scratchFolder, err) + } + } + + // Create sandbox.vhdx if it doesn't exist in the scratch folder. It's called sandbox.vhdx + // rather than scratch.vhdx as in the v1 schema, it's hard-coded in HCS. + if _, err := os.Stat(filepath.Join(scratchFolder, "sandbox.vhdx")); os.IsNotExist(err) { + logrus.Debugf("hcsshim::allocateWindowsResources container sandbox.vhdx does not exist so creating in %s ", scratchFolder) + if err := wclayer.CreateScratchLayer(scratchFolder, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1]); err != nil { + return fmt.Errorf("failed to CreateSandboxLayer %s", err) + } + } + + if coi.Spec.Root == nil { + coi.Spec.Root = &specs.Root{} + } + + if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) { + logrus.Debugln("hcsshim::allocateWindowsResources mounting storage") + mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem) + if err != nil { + return fmt.Errorf("failed to mount container storage: %s", err) + } + if coi.HostingSystem == nil { + coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2 + } else { + coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon WCOW + } + resources.layers = coi.Spec.Windows.LayerFolders + } + + // Validate each of the mounts. If this is a V2 Xenon, we have to add them as + // VSMB shares to the utility VM. For V1 Xenon and Argons, there's nothing for + // us to do as it's done by HCS. + for i, mount := range coi.Spec.Mounts { + if mount.Destination == "" || mount.Source == "" { + return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount) + } + switch mount.Type { + case "": + case "physical-disk": + case "virtual-disk": + default: + return fmt.Errorf("invalid OCI spec - Type '%s' not supported", mount.Type) + } + + if coi.HostingSystem != nil && schemaversion.IsV21(coi.actualSchemaVersion) { + uvmPath := fmt.Sprintf("C:\\%s\\%d", coi.actualID, i) + + readOnly := false + for _, o := range mount.Options { + if strings.ToLower(o) == "ro" { + readOnly = true + break + } + } + if mount.Type == "physical-disk" { + logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount %+v", mount) + _, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(mount.Source, uvmPath, readOnly) + if err != nil { + return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err) + } + coi.Spec.Mounts[i].Type = "" + resources.scsiMounts = append(resources.scsiMounts, mount.Source) + } else if mount.Type == "virtual-disk" { + logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount %+v", mount) + _, _, err := coi.HostingSystem.AddSCSI(mount.Source, uvmPath, readOnly) + if err != nil { + return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err) + } + coi.Spec.Mounts[i].Type = "" + resources.scsiMounts = append(resources.scsiMounts, mount.Source) + } else { + logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding VSMB share for OCI mount %+v", mount) + options := &hcsschema.VirtualSmbShareOptions{} + if readOnly { + options.ReadOnly = true + options.CacheIo = true + options.ShareRead = true + options.ForceLevelIIOplocks = true + break + } + + err := coi.HostingSystem.AddVSMB(mount.Source, "", options) + if err != nil { + return fmt.Errorf("failed to add VSMB share to utility VM for mount %+v: %s", mount, err) + } + resources.vsmbMounts = append(resources.vsmbMounts, mount.Source) + } + } + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_argon_test.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_argon_test.go new file mode 100644 index 00000000..ca22a56c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_argon_test.go @@ -0,0 +1,260 @@ +// +build windows,functional + +package hcsoci + +//import ( +// "os" +// "path/filepath" +// "testing" + +// "github.com/Microsoft/hcsshim/internal/schemaversion" +// specs "github.com/opencontainers/runtime-spec/specs-go" +//) + +//// -------------------------------- +//// W C O W A R G O N V 1 +//// -------------------------------- + +//// A v1 Argon with a single base layer. It also validates hostname functionality is propagated. +//func TestV1Argon(t *testing.T) { +// t.Skip("fornow") +// tempDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(tempDir) + +// layers := append(layersNanoserver, tempDir) +// mountPath, err := mountContainerLayers(layers, nil) +// if err != nil { +// t.Fatalf("failed to mount container storage: %s", err) +// } +// defer unmountContainerLayers(layers, nil, unmountOperationAll) + +// c, err := CreateContainer(&CreateOptions{ +// SchemaVersion: schemaversion.SchemaV10(), +// Id: "TestV1Argon", +// Owner: "unit-test", +// Spec: &specs.Spec{ +// Hostname: "goofy", +// Windows: &specs.Windows{LayerFolders: layers}, +// Root: &specs.Root{Path: mountPath.(string)}, +// }, +// }) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// startContainer(t, c) +// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello") +// runCommand(t, c, "cmd /s /c hostname", `c:\`, "goofy") +// stopContainer(t, c) +// c.Terminate() +//} + +//// A v1 Argon with a single base layer which uses the auto-mount capability +//func TestV1ArgonAutoMount(t *testing.T) { +// t.Skip("fornow") +// tempDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(tempDir) + +// layers := append(layersBusybox, tempDir) +// c, err := CreateContainer(&CreateOptions{ +// Id: "TestV1ArgonAutoMount", +// SchemaVersion: schemaversion.SchemaV10(), +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}}, +// }) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// defer unmountContainerLayers(layers, nil, unmountOperationAll) +// startContainer(t, c) +// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello") +// stopContainer(t, c) +// c.Terminate() +//} + +//// A v1 Argon with multiple layers which uses the auto-mount capability +//func TestV1ArgonMultipleBaseLayersAutoMount(t *testing.T) { +// t.Skip("fornow") + +// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory +// containerScratchDir := createTempDir(t) +// os.RemoveAll(containerScratchDir) +// defer os.RemoveAll(containerScratchDir) // As auto-created + +// layers := append(layersBusybox, containerScratchDir) +// c, err := CreateContainer(&CreateOptions{ +// Id: "TestV1ArgonMultipleBaseLayersAutoMount", +// SchemaVersion: schemaversion.SchemaV10(), +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}}, +// }) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// defer unmountContainerLayers(layers, nil, unmountOperationAll) +// startContainer(t, c) +// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello") +// stopContainer(t, c) +// c.Terminate() +//} + +//// A v1 Argon with a single mapped directory. +//func TestV1ArgonSingleMappedDirectory(t *testing.T) { +// t.Skip("fornow") +// tempDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(tempDir) + +// layers := append(layersNanoserver, tempDir) + +// // Create a temp folder containing foo.txt which will be used for the bind-mount test. +// source := createTempDir(t) +// defer os.RemoveAll(source) +// mount := specs.Mount{ +// Source: source, +// Destination: `c:\foo`, +// } +// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755) +// f.Close() + +// c, err := CreateContainer(&CreateOptions{ +// SchemaVersion: schemaversion.SchemaV10(), +// Spec: &specs.Spec{ +// Windows: &specs.Windows{LayerFolders: layers}, +// Mounts: []specs.Mount{mount}, +// }, +// }) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// defer unmountContainerLayers(layers, nil, unmountOperationAll) + +// startContainer(t, c) +// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt") +// stopContainer(t, c) +// c.Terminate() +//} + +//// -------------------------------- +//// W C O W A R G O N V 2 +//// -------------------------------- + +//// A v2 Argon with a single base layer. It also validates hostname functionality is propagated. +//// It also uses an auto-generated ID. +//func TestV2Argon(t *testing.T) { +// t.Skip("fornow") +// tempDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(tempDir) + +// layers := append(layersNanoserver, tempDir) +// mountPath, err := mountContainerLayers(layers, nil) +// if err != nil { +// t.Fatalf("failed to mount container storage: %s", err) +// } +// defer unmountContainerLayers(layers, nil, unmountOperationAll) + +// c, err := CreateContainer(&CreateOptions{ +// SchemaVersion: schemaversion.SchemaV21(), +// Spec: &specs.Spec{ +// Hostname: "mickey", +// Windows: &specs.Windows{LayerFolders: layers}, +// Root: &specs.Root{Path: mountPath.(string)}, +// }, +// }) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// startContainer(t, c) +// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello") +// runCommand(t, c, "cmd /s /c hostname", `c:\`, "mickey") +// stopContainer(t, c) +// c.Terminate() +//} + +//// A v2 Argon with multiple layers +//func TestV2ArgonMultipleBaseLayers(t *testing.T) { +// t.Skip("fornow") +// tempDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(tempDir) + +// layers := append(layersBusybox, tempDir) +// mountPath, err := mountContainerLayers(layers, nil) +// if err != nil { +// t.Fatalf("failed to mount container storage: %s", err) +// } +// defer unmountContainerLayers(layers, nil, unmountOperationAll) + +// c, err := CreateContainer(&CreateOptions{ +// SchemaVersion: schemaversion.SchemaV21(), +// Id: "TestV2ArgonMultipleBaseLayers", +// Spec: &specs.Spec{ +// Windows: &specs.Windows{LayerFolders: layers}, +// Root: &specs.Root{Path: mountPath.(string)}, +// }, +// }) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// startContainer(t, c) +// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello") +// stopContainer(t, c) +// c.Terminate() +//} + +//// A v2 Argon with multiple layers which uses the auto-mount capability and auto-create +//func TestV2ArgonAutoMountMultipleBaseLayers(t *testing.T) { +// t.Skip("fornow") + +// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory +// containerScratchDir := createTempDir(t) +// os.RemoveAll(containerScratchDir) +// defer os.RemoveAll(containerScratchDir) // As auto-created + +// layers := append(layersBusybox, containerScratchDir) + +// c, err := CreateContainer(&CreateOptions{ +// SchemaVersion: schemaversion.SchemaV21(), +// Id: "TestV2ArgonAutoMountMultipleBaseLayers", +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}}, +// }) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// defer unmountContainerLayers(layers, nil, unmountOperationAll) +// startContainer(t, c) +// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello") +// stopContainer(t, c) +// c.Terminate() +//} + +//// A v2 Argon with a single mapped directory. +//func TestV2ArgonSingleMappedDirectory(t *testing.T) { +// t.Skip("fornow") +// tempDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(tempDir) + +// layers := append(layersNanoserver, tempDir) + +// // Create a temp folder containing foo.txt which will be used for the bind-mount test. +// source := createTempDir(t) +// defer os.RemoveAll(source) +// mount := specs.Mount{ +// Source: source, +// Destination: `c:\foo`, +// } +// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755) +// f.Close() + +// c, err := CreateContainer(&CreateOptions{ +// SchemaVersion: schemaversion.SchemaV21(), +// Spec: &specs.Spec{ +// Windows: &specs.Windows{LayerFolders: layers}, +// Mounts: []specs.Mount{mount}, +// }, +// }) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// defer unmountContainerLayers(layers, nil, unmountOperationAll) + +// startContainer(t, c) +// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt") +// stopContainer(t, c) +// c.Terminate() +//} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_xenon_test.go b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_xenon_test.go new file mode 100644 index 00000000..896eb816 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_xenon_test.go @@ -0,0 +1,365 @@ +// +build windows,functional + +package hcsoci + +//import ( +// "fmt" +// "os" +// "path/filepath" +// "testing" + +// "github.com/Microsoft/hcsshim/internal/schemaversion" +// specs "github.com/opencontainers/runtime-spec/specs-go" +//) + +//// -------------------------------- +//// W C O W X E N O N V 2 +//// -------------------------------- + +//// A single WCOW xenon. Note in this test, neither the UVM or the +//// containers are supplied IDs - they will be autogenerated for us. +//// This is the minimum set of parameters needed to create a V2 WCOW xenon. +//func TestV2XenonWCOW(t *testing.T) { +// t.Skip("Skipping for now") +// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil) +// defer os.RemoveAll(uvmScratchDir) +// defer uvm.Terminate() + +// // Create the container hosted inside the utility VM +// containerScratchDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(containerScratchDir) +// layerFolders := append(layersNanoserver, containerScratchDir) +// hostedContainer, err := CreateContainer(&CreateOptions{ +// HostingSystem: uvm, +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}}, +// }) +// if err != nil { +// t.Fatalf("CreateContainer failed: %s", err) +// } +// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll) + +// // Start/stop the container +// startContainer(t, hostedContainer) +// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW") +// stopContainer(t, hostedContainer) +// hostedContainer.Terminate() +//} + +//// TODO: Have a similar test where the UVM scratch folder does not exist. +//// A single WCOW xenon but where the container sandbox folder is not pre-created by the client +//func TestV2XenonWCOWContainerSandboxFolderDoesNotExist(t *testing.T) { +// t.Skip("Skipping for now") +// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWContainerSandboxFolderDoesNotExist_UVM", nil) +// defer os.RemoveAll(uvmScratchDir) +// defer uvm.Terminate() + +// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory +// containerScratchDir := createTempDir(t) +// os.RemoveAll(containerScratchDir) +// defer os.RemoveAll(containerScratchDir) // As auto-created + +// layerFolders := append(layersBusybox, containerScratchDir) +// hostedContainer, err := CreateContainer(&CreateOptions{ +// Id: "container", +// HostingSystem: uvm, +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}}, +// }) +// if err != nil { +// t.Fatalf("CreateContainer failed: %s", err) +// } +// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll) + +// // Start/stop the container +// startContainer(t, hostedContainer) +// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW") +// stopContainer(t, hostedContainer) +// hostedContainer.Terminate() +//} + +//// TODO What about mount. Test with the client doing the mount. +//// TODO Test as above, but where sandbox for UVM is entirely created by a client to show how it's done. + +//// Two v2 WCOW containers in the same UVM, each with a single base layer +//func TestV2XenonWCOWTwoContainers(t *testing.T) { +// t.Skip("Skipping for now") +// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWTwoContainers_UVM", nil) +// defer os.RemoveAll(uvmScratchDir) +// defer uvm.Terminate() + +// // First hosted container +// firstContainerScratchDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(firstContainerScratchDir) +// firstLayerFolders := append(layersNanoserver, firstContainerScratchDir) +// firstHostedContainer, err := CreateContainer(&CreateOptions{ +// Id: "FirstContainer", +// HostingSystem: uvm, +// SchemaVersion: schemaversion.SchemaV21(), +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: firstLayerFolders}}, +// }) +// if err != nil { +// t.Fatalf("CreateContainer failed: %s", err) +// } +// defer unmountContainerLayers(firstLayerFolders, uvm, unmountOperationAll) + +// // Second hosted container +// secondContainerScratchDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(firstContainerScratchDir) +// secondLayerFolders := append(layersNanoserver, secondContainerScratchDir) +// secondHostedContainer, err := CreateContainer(&CreateOptions{ +// Id: "SecondContainer", +// HostingSystem: uvm, +// SchemaVersion: schemaversion.SchemaV21(), +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: secondLayerFolders}}, +// }) +// if err != nil { +// t.Fatalf("CreateContainer failed: %s", err) +// } +// defer unmountContainerLayers(secondLayerFolders, uvm, unmountOperationAll) + +// startContainer(t, firstHostedContainer) +// runCommand(t, firstHostedContainer, "cmd /s /c echo FirstContainer", `c:\`, "FirstContainer") +// startContainer(t, secondHostedContainer) +// runCommand(t, secondHostedContainer, "cmd /s /c echo SecondContainer", `c:\`, "SecondContainer") +// stopContainer(t, firstHostedContainer) +// stopContainer(t, secondHostedContainer) +// firstHostedContainer.Terminate() +// secondHostedContainer.Terminate() +//} + +////// This verifies the container storage is unmounted correctly so that a second +////// container can be started from the same storage. +////func TestV2XenonWCOWWithRemount(t *testing.T) { +////// //t.Skip("Skipping for now") +//// uvmID := "Testv2XenonWCOWWithRestart_UVM" +//// uvmScratchDir, err := ioutil.TempDir("", "uvmScratch") +//// if err != nil { +//// t.Fatalf("Failed create temporary directory: %s", err) +//// } +//// if err := CreateWCOWSandbox(layersNanoserver[0], uvmScratchDir, uvmID); err != nil { +//// t.Fatalf("Failed create Windows UVM Sandbox: %s", err) +//// } +//// defer os.RemoveAll(uvmScratchDir) + +//// uvm, err := CreateContainer(&CreateOptions{ +//// Id: uvmID, +//// Owner: "unit-test", +//// SchemaVersion: SchemaV21(), +//// IsHostingSystem: true, +//// Spec: &specs.Spec{ +//// Windows: &specs.Windows{ +//// LayerFolders: []string{uvmScratchDir}, +//// HyperV: &specs.WindowsHyperV{UtilityVMPath: filepath.Join(layersNanoserver[0], `UtilityVM\Files`)}, +//// }, +//// }, +//// }) +//// if err != nil { +//// t.Fatalf("Failed create UVM: %s", err) +//// } +//// defer uvm.Terminate() +//// if err := uvm.Start(); err != nil { +//// t.Fatalf("Failed start utility VM: %s", err) +//// } + +//// // Mount the containers storage in the utility VM +//// containerScratchDir := createWCOWTempDirWithSandbox(t) +//// layerFolders := append(layersNanoserver, containerScratchDir) +//// cls, err := Mount(layerFolders, uvm, SchemaV21()) +//// if err != nil { +//// t.Fatalf("failed to mount container storage: %s", err) +//// } +//// combinedLayers := cls.(CombinedLayersV2) +//// mountedLayers := &ContainersResourcesStorageV2{ +//// Layers: combinedLayers.Layers, +//// Path: combinedLayers.ContainerRootPath, +//// } +//// defer func() { +//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil { +//// t.Fatalf("failed to unmount container storage: %s", err) +//// } +//// }() + +//// // Create the first container +//// defer os.RemoveAll(containerScratchDir) +//// xenon, err := CreateContainer(&CreateOptions{ +//// Id: "container", +//// Owner: "unit-test", +//// HostingSystem: uvm, +//// SchemaVersion: SchemaV21(), +//// Spec: &specs.Spec{Windows: &specs.Windows{}}, // No layerfolders as we mounted them ourself. +//// }) +//// if err != nil { +//// t.Fatalf("CreateContainer failed: %s", err) +//// } + +//// // Start/stop the first container +//// startContainer(t, xenon) +//// runCommand(t, xenon, "cmd /s /c echo TestV2XenonWCOWFirstStart", `c:\`, "TestV2XenonWCOWFirstStart") +//// stopContainer(t, xenon) +//// xenon.Terminate() + +//// // Now unmount and remount to exactly the same places +//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil { +//// t.Fatalf("failed to unmount container storage: %s", err) +//// } +//// if _, err = Mount(layerFolders, uvm, SchemaV21()); err != nil { +//// t.Fatalf("failed to mount container storage: %s", err) +//// } + +//// // Create an identical second container and verify it works too. +//// xenon2, err := CreateContainer(&CreateOptions{ +//// Id: "container", +//// Owner: "unit-test", +//// HostingSystem: uvm, +//// SchemaVersion: SchemaV21(), +//// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}}, +//// MountedLayers: mountedLayers, +//// }) +//// if err != nil { +//// t.Fatalf("CreateContainer failed: %s", err) +//// } +//// startContainer(t, xenon2) +//// runCommand(t, xenon2, "cmd /s /c echo TestV2XenonWCOWAfterRemount", `c:\`, "TestV2XenonWCOWAfterRemount") +//// stopContainer(t, xenon2) +//// xenon2.Terminate() +////} + +//// Lots of v2 WCOW containers in the same UVM, each with a single base layer. Containers aren't +//// actually started, but it stresses the SCSI controller hot-add logic. +//func TestV2XenonWCOWCreateLots(t *testing.T) { +// t.Skip("Skipping for now") +// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWCreateLots", nil) +// defer os.RemoveAll(uvmScratchDir) +// defer uvm.Terminate() + +// // 63 as 0:0 is already taken as the UVMs scratch. So that leaves us with 64-1 left for container scratches on SCSI +// for i := 0; i < 63; i++ { +// containerScratchDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(containerScratchDir) +// layerFolders := append(layersNanoserver, containerScratchDir) +// hostedContainer, err := CreateContainer(&CreateOptions{ +// Id: fmt.Sprintf("container%d", i), +// HostingSystem: uvm, +// SchemaVersion: schemaversion.SchemaV21(), +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}}, +// }) +// if err != nil { +// t.Fatalf("CreateContainer failed: %s", err) +// } +// defer hostedContainer.Terminate() +// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll) +// } + +// // TODO: Should check the internal structures here for VSMB and SCSI + +// // TODO: Push it over 63 now and will get a failure. +//} + +//// Helper for the v2 Xenon tests to create a utility VM. Returns the UtilityVM +//// object; folder used as its scratch +//func createv2WCOWUVM(t *testing.T, uvmLayers []string, uvmId string, resources *specs.WindowsResources) (*UtilityVM, string) { +// scratchDir := createTempDir(t) +// uvm := UtilityVM{ +// OperatingSystem: "windows", +// LayerFolders: append(uvmLayers, scratchDir), +// Resources: resources, +// } +// if uvmId != "" { +// uvm.Id = uvmId +// } +// if err := uvm.Create(); err != nil { +// t.Fatalf("Failed create WCOW v2 UVM: %s", err) +// } +// if err := uvm.Start(); err != nil { +// t.Fatalf("Failed start WCOW v2UVM: %s", err) + +// } +// return &uvm, scratchDir +//} + +//// TestV2XenonWCOWMultiLayer creates a V2 Xenon having multiple image layers +//func TestV2XenonWCOWMultiLayer(t *testing.T) { +// t.Skip("for now") + +// uvmMemory := uint64(1 * 1024 * 1024 * 1024) +// uvmCPUCount := uint64(2) +// resources := &specs.WindowsResources{ +// Memory: &specs.WindowsMemoryResources{ +// Limit: &uvmMemory, +// }, +// CPU: &specs.WindowsCPUResources{ +// Count: &uvmCPUCount, +// }, +// } +// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWMultiLayer_UVM", resources) +// defer os.RemoveAll(uvmScratchDir) +// defer uvm.Terminate() + +// // Create a sandbox for the hosted container +// containerScratchDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(containerScratchDir) + +// // Create the container. Note that this will auto-mount for us. +// containerLayers := append(layersBusybox, containerScratchDir) +// xenon, err := CreateContainer(&CreateOptions{ +// Id: "container", +// HostingSystem: uvm, +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: containerLayers}}, +// }) +// if err != nil { +// t.Fatalf("CreateContainer failed: %s", err) +// } + +// // Start/stop the container +// startContainer(t, xenon) +// runCommand(t, xenon, "echo Container", `c:\`, "Container") +// stopContainer(t, xenon) +// xenon.Terminate() +// // TODO Move this to a defer function to fail if it fails. +// if err := unmountContainerLayers(containerLayers, uvm, unmountOperationAll); err != nil { +// t.Fatalf("unmount failed: %s", err) +// } + +//} + +//// TestV2XenonWCOWSingleMappedDirectory tests a V2 Xenon WCOW with a single mapped directory +//func TestV2XenonWCOWSingleMappedDirectory(t *testing.T) { +// t.Skip("Skipping for now") +// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil) +// defer os.RemoveAll(uvmScratchDir) +// defer uvm.Terminate() + +// // Create the container hosted inside the utility VM +// containerScratchDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(containerScratchDir) +// layerFolders := append(layersNanoserver, containerScratchDir) + +// // Create a temp folder containing foo.txt which will be used for the bind-mount test. +// source := createTempDir(t) +// defer os.RemoveAll(source) +// mount := specs.Mount{ +// Source: source, +// Destination: `c:\foo`, +// } +// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755) +// f.Close() + +// hostedContainer, err := CreateContainer(&CreateOptions{ +// HostingSystem: uvm, +// Spec: &specs.Spec{ +// Windows: &specs.Windows{LayerFolders: layerFolders}, +// Mounts: []specs.Mount{mount}, +// }, +// }) +// if err != nil { +// t.Fatalf("CreateContainer failed: %s", err) +// } +// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll) + +// // TODO BUGBUG NEED TO UNMOUNT TO VSMB SHARE FOR THE CONTAINER + +// // Start/stop the container +// startContainer(t, hostedContainer) +// runCommand(t, hostedContainer, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt") +// stopContainer(t, hostedContainer) +// hostedContainer.Terminate() +//} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go new file mode 100644 index 00000000..b2e475f5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go @@ -0,0 +1,23 @@ +package hns + +import "fmt" + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hns.go + +//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? + +type EndpointNotFoundError struct { + EndpointName string +} + +func (e EndpointNotFoundError) Error() string { + return fmt.Sprintf("Endpoint %s not found", e.EndpointName) +} + +type NetworkNotFoundError struct { + NetworkName string +} + +func (e NetworkNotFoundError) Error() string { + return fmt.Sprintf("Network %s not found", e.NetworkName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go new file mode 100644 index 00000000..59ec7004 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -0,0 +1,262 @@ +package hns + +import ( + "encoding/json" + "net" + + "github.com/sirupsen/logrus" +) + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + VirtualNetwork string `json:",omitempty"` + VirtualNetworkName string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacAddress string `json:",omitempty"` + IPAddress net.IP `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + EnableInternalDNS bool `json:",omitempty"` + DisableICC bool `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` + IsRemoteEndpoint bool `json:",omitempty"` + EnableLowMetric bool `json:",omitempty"` + Namespace *Namespace `json:",omitempty"` + EncapOverhead uint16 `json:",omitempty"` +} + +//SystemType represents the type of the system on which actions are done +type SystemType string + +// SystemType const +const ( + ContainerType SystemType = "Container" + VirtualMachineType SystemType = "VirtualMachine" + HostType SystemType = "Host" +) + +// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type EndpointAttachDetachRequest struct { + ContainerID string `json:"ContainerId,omitempty"` + SystemType SystemType `json:"SystemType"` + CompartmentID uint16 `json:"CompartmentId,omitempty"` + VirtualNICName string `json:"VirtualNicName,omitempty"` +} + +// EndpointResquestResponse is object to get the endpoint request response +type EndpointResquestResponse struct { + Success bool + Error string +} + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + endpoint := &HNSEndpoint{} + err := hnsCall(method, "/endpoints/"+path, request, &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + var endpoint []HNSEndpoint + err := hnsCall("GET", "/endpoints/", "", &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// GetHNSEndpointByID get the Endpoint by ID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return HNSEndpointRequest("GET", endpointID, "") +} + +// GetHNSEndpointByName gets the endpoint filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + hnsResponse, err := HNSListEndpointRequest() + if err != nil { + return nil, err + } + for _, hnsEndpoint := range hnsResponse { + if hnsEndpoint.Name == endpointName { + return &hnsEndpoint, nil + } + } + return nil, EndpointNotFoundError{EndpointName: endpointName} +} + +// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods +func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { + operation := "Create" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + return HNSEndpointRequest("POST", "", string(jsonString)) +} + +// Delete Endpoint by sending EndpointRequest to HNS +func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { + operation := "Delete" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + return HNSEndpointRequest("DELETE", endpoint.Id, "") +} + +// Update Endpoint +func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { + operation := "Update" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint) + + return endpoint, err +} + +// ApplyACLPolicy applies a set of ACL Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { + operation := "ApplyACLPolicy" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + +// ContainerAttach attaches an endpoint to container +func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { + operation := "ContainerAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + CompartmentID: compartmentID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// ContainerDetach detaches an endpoint from container +func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error { + operation := "ContainerDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// HostAttach attaches a nic on the host +func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { + operation := "HostAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + CompartmentID: compartmentID, + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) + +} + +// HostDetach detaches a nic on the host +func (endpoint *HNSEndpoint) HostDetach() error { + operation := "HostDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// VirtualMachineNICAttach attaches a endpoint to a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error { + operation := "VirtualMachineNicAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + VirtualNICName: virtualMachineNICName, + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// VirtualMachineNICDetach detaches a endpoint from a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error { + operation := "VirtualMachineNicDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go new file mode 100644 index 00000000..969d1b26 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go @@ -0,0 +1,42 @@ +package hns + +import ( + "encoding/json" + "fmt" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +func hnsCall(method, path, request string, returnResponse interface{}) error { + var responseBuffer *uint16 + logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) + + err := _hnsCall(method, path, request, &responseBuffer) + if err != nil { + return hcserror.New(err, "hnsCall ", "") + } + response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) + + hnsresponse := &hnsResponse{} + if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { + return err + } + + if !hnsresponse.Success { + return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) + } + + if len(hnsresponse.Output) == 0 { + return nil + } + + logrus.Debugf("Network Response : %s", hnsresponse.Output) + err = json.Unmarshal(hnsresponse.Output, returnResponse) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go new file mode 100644 index 00000000..a8d8cc56 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go @@ -0,0 +1,28 @@ +package hns + +type HNSGlobals struct { + Version HNSVersion `json:"Version"` +} + +type HNSVersion struct { + Major int `json:"Major"` + Minor int `json:"Minor"` +} + +var ( + HNSVersion1803 = HNSVersion{Major: 7, Minor: 2} +) + +func GetHNSGlobals() (*HNSGlobals, error) { + var version HNSVersion + err := hnsCall("GET", "/globals/version", "", &version) + if err != nil { + return nil, err + } + + globals := &HNSGlobals{ + Version: version, + } + + return globals, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go new file mode 100644 index 00000000..7e859de9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go @@ -0,0 +1,141 @@ +package hns + +import ( + "encoding/json" + "net" + + "github.com/sirupsen/logrus" +) + +// Subnet is assoicated with a network and represents a list +// of subnets available to the network +type Subnet struct { + AddressPrefix string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` +} + +// MacPool is assoicated with a network and represents a list +// of macaddresses available to the network +type MacPool struct { + StartMacAddress string `json:",omitempty"` + EndMacAddress string `json:",omitempty"` +} + +// HNSNetwork represents a network in HNS +type HNSNetwork struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + Type string `json:",omitempty"` + NetworkAdapterName string `json:",omitempty"` + SourceMac string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacPools []MacPool `json:",omitempty"` + Subnets []Subnet `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + DNSServerCompartment uint32 `json:",omitempty"` + ManagementIP string `json:",omitempty"` + AutomaticDNS bool `json:",omitempty"` +} + +type hnsNetworkResponse struct { + Success bool + Error string + Output HNSNetwork +} + +type hnsResponse struct { + Success bool + Error string + Output json.RawMessage +} + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + var network HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return &network, nil +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + var network []HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return network, nil +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return HNSNetworkRequest("GET", networkID, "") +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + hsnnetworks, err := HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + for _, hnsnetwork := range hsnnetworks { + if hnsnetwork.Name == networkName { + return &hnsnetwork, nil + } + } + return nil, NetworkNotFoundError{NetworkName: networkName} +} + +// Create Network by sending NetworkRequest to HNS. +func (network *HNSNetwork) Create() (*HNSNetwork, error) { + operation := "Create" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + jsonString, err := json.Marshal(network) + if err != nil { + return nil, err + } + return HNSNetworkRequest("POST", "", string(jsonString)) +} + +// Delete Network by sending NetworkRequest to HNS +func (network *HNSNetwork) Delete() (*HNSNetwork, error) { + operation := "Delete" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + return HNSNetworkRequest("DELETE", network.Id, "") +} + +// Creates an endpoint on the Network. +func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint { + return &HNSEndpoint{ + VirtualNetwork: network.Id, + IPAddress: ipAddress, + MacAddress: string(macAddress), + } +} + +func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateEndpoint" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id) + + endpoint.VirtualNetwork = network.Id + return endpoint.Create() +} + +func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateRemoteEndpoint" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + endpoint.IsRemoteEndpoint = true + return network.CreateEndpoint(endpoint) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go new file mode 100644 index 00000000..2318a4fc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -0,0 +1,98 @@ +package hns + +// Type of Request Support in ModifySystem +type PolicyType string + +// RequestType const +const ( + Nat PolicyType = "NAT" + ACL PolicyType = "ACL" + PA PolicyType = "PA" + VLAN PolicyType = "VLAN" + VSID PolicyType = "VSID" + VNet PolicyType = "VNET" + L2Driver PolicyType = "L2Driver" + Isolation PolicyType = "Isolation" + QOS PolicyType = "QOS" + OutboundNat PolicyType = "OutBoundNAT" + ExternalLoadBalancer PolicyType = "ELB" + Route PolicyType = "ROUTE" +) + +type NatPolicy struct { + Type PolicyType `json:"Type"` + Protocol string + InternalPort uint16 + ExternalPort uint16 +} + +type QosPolicy struct { + Type PolicyType `json:"Type"` + MaximumOutgoingBandwidthInBytes uint64 +} + +type IsolationPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint + VSID uint + InDefaultIsolation bool +} + +type VlanPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint +} + +type VsidPolicy struct { + Type PolicyType `json:"Type"` + VSID uint +} + +type PaPolicy struct { + Type PolicyType `json:"Type"` + PA string `json:"PA"` +} + +type OutboundNatPolicy struct { + Policy + VIP string `json:"VIP,omitempty"` + Exceptions []string `json:"ExceptionList,omitempty"` +} + +type ActionType string +type DirectionType string +type RuleType string + +const ( + Allow ActionType = "Allow" + Block ActionType = "Block" + + In DirectionType = "In" + Out DirectionType = "Out" + + Host RuleType = "Host" + Switch RuleType = "Switch" +) + +type ACLPolicy struct { + Type PolicyType `json:"Type"` + Id string `json:"Id,omitempty"` + Protocol uint16 + Protocols string `json:"Protocols,omitempty"` + InternalPort uint16 + Action ActionType + Direction DirectionType + LocalAddresses string + RemoteAddresses string + LocalPorts string `json:"LocalPorts,omitempty"` + LocalPort uint16 + RemotePorts string `json:"RemotePorts,omitempty"` + RemotePort uint16 + RuleType RuleType `json:"RuleType,omitempty"` + Priority uint16 + ServiceName string +} + +type Policy struct { + Type PolicyType `json:"Type"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go new file mode 100644 index 00000000..31322a68 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go @@ -0,0 +1,201 @@ +package hns + +import ( + "encoding/json" + + "github.com/sirupsen/logrus" +) + +// RoutePolicy is a structure defining schema for Route based Policy +type RoutePolicy struct { + Policy + DestinationPrefix string `json:"DestinationPrefix,omitempty"` + NextHop string `json:"NextHop,omitempty"` + EncapEnabled bool `json:"NeedEncap,omitempty"` +} + +// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy +type ELBPolicy struct { + LBPolicy + SourceVIP string `json:"SourceVIP,omitempty"` + VIPs []string `json:"VIPs,omitempty"` + ILB bool `json:"ILB,omitempty"` + DSR bool `json:"IsDSR,omitempty"` +} + +// LBPolicy is a structure defining schema for LoadBalancing based Policy +type LBPolicy struct { + Policy + Protocol uint16 `json:"Protocol,omitempty"` + InternalPort uint16 + ExternalPort uint16 +} + +// PolicyList is a structure defining schema for Policy list request +type PolicyList struct { + ID string `json:"ID,omitempty"` + EndpointReferences []string `json:"References,omitempty"` + Policies []json.RawMessage `json:"Policies,omitempty"` +} + +// HNSPolicyListRequest makes a call into HNS to update/query a single network +func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { + var policy PolicyList + err := hnsCall(method, "/policylists/"+path, request, &policy) + if err != nil { + return nil, err + } + + return &policy, nil +} + +// HNSListPolicyListRequest gets all the policy list +func HNSListPolicyListRequest() ([]PolicyList, error) { + var plist []PolicyList + err := hnsCall("GET", "/policylists/", "", &plist) + if err != nil { + return nil, err + } + + return plist, nil +} + +// PolicyListRequest makes a HNS call to modify/query a network policy list +func PolicyListRequest(method, path, request string) (*PolicyList, error) { + policylist := &PolicyList{} + err := hnsCall(method, "/policylists/"+path, request, &policylist) + if err != nil { + return nil, err + } + + return policylist, nil +} + +// GetPolicyListByID get the policy list by ID +func GetPolicyListByID(policyListID string) (*PolicyList, error) { + return PolicyListRequest("GET", policyListID, "") +} + +// Create PolicyList by sending PolicyListRequest to HNS. +func (policylist *PolicyList) Create() (*PolicyList, error) { + operation := "Create" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + jsonString, err := json.Marshal(policylist) + if err != nil { + return nil, err + } + return PolicyListRequest("POST", "", string(jsonString)) +} + +// Delete deletes PolicyList +func (policylist *PolicyList) Delete() (*PolicyList, error) { + operation := "Delete" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + + return PolicyListRequest("DELETE", policylist.ID, "") +} + +// AddEndpoint add an endpoint to a Policy List +func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "AddEndpoint" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + + return policylist.Create() +} + +// RemoveEndpoint removes an endpoint from the Policy List +func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "RemoveEndpoint" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + elementToRemove := "/endpoints/" + endpoint.Id + + var references []string + + for _, endpointReference := range policylist.EndpointReferences { + if endpointReference == elementToRemove { + continue + } + references = append(references, endpointReference) + } + policylist.EndpointReferences = references + return policylist.Create() +} + +// AddLoadBalancer policy list for the specified endpoints +func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { + operation := "AddLoadBalancer" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) + + policylist := &PolicyList{} + + elbPolicy := &ELBPolicy{ + SourceVIP: sourceVIP, + ILB: isILB, + } + + if len(vip) > 0 { + elbPolicy.VIPs = []string{vip} + } + elbPolicy.Type = ExternalLoadBalancer + elbPolicy.Protocol = protocol + elbPolicy.InternalPort = internalPort + elbPolicy.ExternalPort = externalPort + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(elbPolicy) + if err != nil { + return nil, err + } + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} + +// AddRoute adds route policy list for the specified endpoints +func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { + operation := "AddRoute" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix) + + policylist := &PolicyList{} + + rPolicy := &RoutePolicy{ + DestinationPrefix: destinationPrefix, + NextHop: nextHop, + EncapEnabled: encapEnabled, + } + rPolicy.Type = Route + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(rPolicy) + if err != nil { + return nil, err + } + + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go new file mode 100644 index 00000000..d5efba7f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go @@ -0,0 +1,49 @@ +package hns + +import ( + "github.com/sirupsen/logrus" +) + +type HNSSupportedFeatures struct { + Acl HNSAclFeatures `json:"ACL"` +} + +type HNSAclFeatures struct { + AclAddressLists bool `json:"AclAddressLists"` + AclNoHostRulePriority bool `json:"AclHostRulePriority"` + AclPortRanges bool `json:"AclPortRanges"` + AclRuleId bool `json:"AclRuleId"` +} + +func GetHNSSupportedFeatures() HNSSupportedFeatures { + var hnsFeatures HNSSupportedFeatures + + globals, err := GetHNSGlobals() + if err != nil { + // Expected on pre-1803 builds, all features will be false/unsupported + logrus.Debugf("Unable to obtain HNS globals: %s", err) + return hnsFeatures + } + + hnsFeatures.Acl = HNSAclFeatures{ + AclAddressLists: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclNoHostRulePriority: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclPortRanges: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclRuleId: isHNSFeatureSupported(globals.Version, HNSVersion1803), + } + + return hnsFeatures +} + +func isHNSFeatureSupported(currentVersion HNSVersion, minVersionSupported HNSVersion) bool { + if currentVersion.Major < minVersionSupported.Major { + return false + } + if currentVersion.Major > minVersionSupported.Major { + return true + } + if currentVersion.Minor < minVersionSupported.Minor { + return false + } + return true +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go new file mode 100644 index 00000000..45e2281b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -0,0 +1,110 @@ +package hns + +import ( + "encoding/json" + "fmt" + "os" + "path" + "strings" +) + +type namespaceRequest struct { + IsDefault bool `json:",omitempty"` +} + +type namespaceEndpointRequest struct { + ID string `json:"Id"` +} + +type NamespaceResource struct { + Type string + Data json.RawMessage +} + +type namespaceResourceRequest struct { + Type string + Data interface{} +} + +type Namespace struct { + ID string + IsDefault bool `json:",omitempty"` + ResourceList []NamespaceResource `json:",omitempty"` +} + +func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) { + var err error + hnspath := "/namespaces/" + if id != nil { + hnspath = path.Join(hnspath, *id) + } + if subpath != "" { + hnspath = path.Join(hnspath, subpath) + } + var reqJSON []byte + if request != nil { + if reqJSON, err = json.Marshal(request); err != nil { + return nil, err + } + } + var ns Namespace + err = hnsCall(method, hnspath, string(reqJSON), &ns) + if err != nil { + if strings.Contains(err.Error(), "Element not found.") { + return nil, os.ErrNotExist + } + return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) + } + return &ns, err +} + +func CreateNamespace() (string, error) { + req := namespaceRequest{} + ns, err := issueNamespaceRequest(nil, "POST", "", &req) + if err != nil { + return "", err + } + return ns.ID, nil +} + +func RemoveNamespace(id string) error { + _, err := issueNamespaceRequest(&id, "DELETE", "", nil) + return err +} + +func GetNamespaceEndpoints(id string) ([]string, error) { + ns, err := issueNamespaceRequest(&id, "GET", "", nil) + if err != nil { + return nil, err + } + var endpoints []string + for _, rsrc := range ns.ResourceList { + if rsrc.Type == "Endpoint" { + var endpoint namespaceEndpointRequest + err = json.Unmarshal(rsrc.Data, &endpoint) + if err != nil { + return nil, fmt.Errorf("unmarshal endpoint: %s", err) + } + endpoints = append(endpoints, endpoint.ID) + } + } + return endpoints, nil +} + +func AddNamespaceEndpoint(id string, endpointID string) error { + resource := namespaceResourceRequest{ + Type: "Endpoint", + Data: namespaceEndpointRequest{endpointID}, + } + _, err := issueNamespaceRequest(&id, "POST", "addresource", &resource) + return err +} + +func RemoveNamespaceEndpoint(id string, endpointID string) error { + resource := namespaceResourceRequest{ + Type: "Endpoint", + Data: namespaceEndpointRequest{endpointID}, + } + _, err := issueNamespaceRequest(&id, "POST", "removeresource", &resource) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go new file mode 100644 index 00000000..204633a4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go @@ -0,0 +1,76 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hns + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procHNSCall = modvmcompute.NewProc("HNSCall") +) + +func _hnsCall(method string, path string, object string, response **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(method) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(object) + if hr != nil { + return + } + return __hnsCall(_p0, _p1, _p2, response) +} + +func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { + if hr = procHNSCall.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go new file mode 100644 index 00000000..2f6ec029 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go @@ -0,0 +1,27 @@ +package interop + +import ( + "syscall" + "unsafe" +) + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go + +//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree + +func ConvertAndFreeCoTaskMemString(buffer *uint16) string { + str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:]) + coTaskMemFree(unsafe.Pointer(buffer)) + return str +} + +func ConvertAndFreeCoTaskMemBytes(buffer *uint16) []byte { + return []byte(ConvertAndFreeCoTaskMemString(buffer)) +} + +func Win32FromHresult(hr uintptr) syscall.Errno { + if hr&0x1fff0000 == 0x00070000 { + return syscall.Errno(hr & 0xffff) + } + return syscall.Errno(hr) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go new file mode 100644 index 00000000..12b0c71c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go @@ -0,0 +1,48 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package interop + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll") + + procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree") +) + +func coTaskMemFree(buffer unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/lcow/constants.go b/vendor/github.com/Microsoft/hcsshim/internal/lcow/constants.go new file mode 100644 index 00000000..08f4e30f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/lcow/constants.go @@ -0,0 +1,9 @@ +package lcow + +const ( + // DefaultScratchSizeGB is the size of the default LCOW scratch disk in GB + DefaultScratchSizeGB = 20 + + // defaultVhdxBlockSizeMB is the block-size for the scratch VHDx's this package can create. + defaultVhdxBlockSizeMB = 1 +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/lcow/debug.go b/vendor/github.com/Microsoft/hcsshim/internal/lcow/debug.go new file mode 100644 index 00000000..ddb0cef8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/lcow/debug.go @@ -0,0 +1,55 @@ +package lcow + +//func debugCommand(s string) string { +// return fmt.Sprintf(`echo -e 'DEBUG COMMAND: %s\\n--------------\\n';%s;echo -e '\\n\\n';`, s, s) +//} + +// DebugLCOWGCS extracts logs from the GCS in LCOW. It's a useful hack for debugging, +// but not necessarily optimal, but all that is available to us in RS3. +//func (container *container) DebugLCOWGCS() { +// if logrus.GetLevel() < logrus.DebugLevel || len(os.Getenv("HCSSHIM_LCOW_DEBUG_ENABLE")) == 0 { +// return +// } + +// var out bytes.Buffer +// cmd := os.Getenv("HCSSHIM_LCOW_DEBUG_COMMAND") +// if cmd == "" { +// cmd = `sh -c "` +// cmd += debugCommand("kill -10 `pidof gcs`") // SIGUSR1 for stackdump +// cmd += debugCommand("ls -l /tmp") +// cmd += debugCommand("cat /tmp/gcs.log") +// cmd += debugCommand("cat /tmp/gcs/gcs-stacks*") +// cmd += debugCommand("cat /tmp/gcs/paniclog*") +// cmd += debugCommand("ls -l /tmp/gcs") +// cmd += debugCommand("ls -l /tmp/gcs/*") +// cmd += debugCommand("cat /tmp/gcs/*/config.json") +// cmd += debugCommand("ls -lR /var/run/gcsrunc") +// cmd += debugCommand("cat /tmp/gcs/global-runc.log") +// cmd += debugCommand("cat /tmp/gcs/*/runc.log") +// cmd += debugCommand("ps -ef") +// cmd += `"` +// } + +// proc, _, err := container.CreateProcessEx( +// &CreateProcessEx{ +// OCISpecification: &specs.Spec{ +// Process: &specs.Process{Args: []string{cmd}}, +// Linux: &specs.Linux{}, +// }, +// CreateInUtilityVm: true, +// Stdout: &out, +// }) +// defer func() { +// if proc != nil { +// proc.Kill() +// proc.Close() +// } +// }() +// if err != nil { +// logrus.Debugln("benign failure getting gcs logs: ", err) +// } +// if proc != nil { +// proc.WaitTimeout(time.Duration(int(time.Second) * 30)) +// } +// logrus.Debugf("GCS Debugging:\n%s\n\nEnd GCS Debugging", strings.TrimSpace(out.String())) +//} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/lcow/process.go b/vendor/github.com/Microsoft/hcsshim/internal/lcow/process.go new file mode 100644 index 00000000..b5df7827 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/lcow/process.go @@ -0,0 +1,161 @@ +package lcow + +import ( + "fmt" + "io" + "strings" + "time" + + "github.com/Microsoft/hcsshim/internal/copywithtimeout" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/schema2" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +// ByteCounts are the number of bytes copied to/from standard handles. Note +// this is int64 rather than uint64 to match the golang io.Copy() signature. +type ByteCounts struct { + In int64 + Out int64 + Err int64 +} + +// ProcessOptions are the set of options which are passed to CreateProcessEx() to +// create a utility vm. +type ProcessOptions struct { + HCSSystem *hcs.System + Process *specs.Process + Stdin io.Reader // Optional reader for sending on to the processes stdin stream + Stdout io.Writer // Optional writer for returning the processes stdout stream + Stderr io.Writer // Optional writer for returning the processes stderr stream + CopyTimeout time.Duration // Timeout for the copy + CreateInUtilityVm bool // If the compute system is a utility VM + ByteCounts ByteCounts // How much data to copy on each stream if they are supplied. 0 means to io.EOF. +} + +// CreateProcess creates a process either in an LCOW utility VM, or for starting +// the init process. TODO: Potentially extend for exec'd processes. +// +// It's essentially a glorified wrapper around hcs.ComputeSystem CreateProcess used +// for internal purposes. +// +// This is used on LCOW to run processes for remote filesystem commands, utilities, +// and debugging. +// +// It optional performs IO copies with timeout between the pipes provided as input, +// and the pipes in the process. +// +// In the ProcessOptions structure, if byte-counts are non-zero, a maximum of those +// bytes are copied to the appropriate standard IO reader/writer. When zero, +// it copies until EOF. It also returns byte-counts indicating how much data +// was sent/received from the process. +// +// It is the responsibility of the caller to call Close() on the process returned. + +func CreateProcess(opts *ProcessOptions) (*hcs.Process, *ByteCounts, error) { + + var environment = make(map[string]string) + copiedByteCounts := &ByteCounts{} + + if opts == nil { + return nil, nil, fmt.Errorf("no options supplied") + } + + if opts.HCSSystem == nil { + return nil, nil, fmt.Errorf("no HCS system supplied") + } + + if opts.CreateInUtilityVm && opts.Process == nil { + return nil, nil, fmt.Errorf("process must be supplied for UVM process") + } + + // Don't pass a process in if this is an LCOW container. This will start the init process. + if opts.Process != nil { + for _, v := range opts.Process.Env { + s := strings.SplitN(v, "=", 2) + if len(s) == 2 && len(s[1]) > 0 { + environment[s[0]] = s[1] + } + } + if _, ok := environment["PATH"]; !ok { + environment["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:" + } + } + + processConfig := &ProcessParameters{ + ProcessParameters: hcsschema.ProcessParameters{ + CreateStdInPipe: (opts.Stdin != nil), + CreateStdOutPipe: (opts.Stdout != nil), + CreateStdErrPipe: (opts.Stderr != nil), + EmulateConsole: false, + }, + CreateInUtilityVm: opts.CreateInUtilityVm, + } + + if opts.Process != nil { + processConfig.Environment = environment + processConfig.CommandLine = strings.Join(opts.Process.Args, " ") + processConfig.WorkingDirectory = opts.Process.Cwd + if processConfig.WorkingDirectory == "" { + processConfig.WorkingDirectory = `/` + } + } + + proc, err := opts.HCSSystem.CreateProcess(processConfig) + if err != nil { + logrus.Debugf("failed to create process: %s", err) + return nil, nil, err + } + + processStdin, processStdout, processStderr, err := proc.Stdio() + if err != nil { + proc.Kill() // Should this have a timeout? + proc.Close() + return nil, nil, fmt.Errorf("failed to get stdio pipes for process %+v: %s", processConfig, err) + } + + // Send the data into the process's stdin + if opts.Stdin != nil { + if copiedByteCounts.In, err = copywithtimeout.Copy(processStdin, + opts.Stdin, + opts.ByteCounts.In, + "stdin", + opts.CopyTimeout); err != nil { + return nil, nil, err + } + + // Don't need stdin now we've sent everything. This signals GCS that we are finished sending data. + if err := proc.CloseStdin(); err != nil && !hcs.IsNotExist(err) && !hcs.IsAlreadyClosed(err) { + // This error will occur if the compute system is currently shutting down + if perr, ok := err.(*hcs.ProcessError); ok && perr.Err != hcs.ErrVmcomputeOperationInvalidState { + return nil, nil, err + } + } + } + + // Copy the data back from stdout + if opts.Stdout != nil { + // Copy the data over to the writer. + if copiedByteCounts.Out, err = copywithtimeout.Copy(opts.Stdout, + processStdout, + opts.ByteCounts.Out, + "stdout", + opts.CopyTimeout); err != nil { + return nil, nil, err + } + } + + // Copy the data back from stderr + if opts.Stderr != nil { + // Copy the data over to the writer. + if copiedByteCounts.Err, err = copywithtimeout.Copy(opts.Stderr, + processStderr, + opts.ByteCounts.Err, + "stderr", + opts.CopyTimeout); err != nil { + return nil, nil, err + } + } + return proc, copiedByteCounts, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go b/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go new file mode 100644 index 00000000..82f8b01b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go @@ -0,0 +1,168 @@ +package lcow + +import ( + "bytes" + "fmt" + "os" + "strings" + "time" + + "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/internal/copyfile" + "github.com/Microsoft/hcsshim/internal/timeout" + "github.com/Microsoft/hcsshim/internal/uvm" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +// CreateScratch uses a utility VM to create an empty scratch disk of a requested size. +// It has a caching capability. If the cacheFile exists, and the request is for a default +// size, a copy of that is made to the target. If the size is non-default, or the cache file +// does not exist, it uses a utility VM to create target. It is the responsibility of the +// caller to synchronise simultaneous attempts to create the cache file. +func CreateScratch(lcowUVM *uvm.UtilityVM, destFile string, sizeGB uint32, cacheFile string, vmID string) error { + + if lcowUVM == nil { + return fmt.Errorf("no uvm") + } + + if lcowUVM.OS() != "linux" { + return fmt.Errorf("CreateLCOWScratch requires a linux utility VM to operate!") + } + + // Smallest we can accept is the default scratch size as we can't size down, only expand. + if sizeGB < DefaultScratchSizeGB { + sizeGB = DefaultScratchSizeGB + } + + logrus.Debugf("hcsshim::CreateLCOWScratch: Dest:%s size:%dGB cache:%s", destFile, sizeGB, cacheFile) + + // Retrieve from cache if the default size and already on disk + if cacheFile != "" && sizeGB == DefaultScratchSizeGB { + if _, err := os.Stat(cacheFile); err == nil { + if err := copyfile.CopyFile(cacheFile, destFile, false); err != nil { + return fmt.Errorf("failed to copy cached file '%s' to '%s': %s", cacheFile, destFile, err) + } + logrus.Debugf("hcsshim::CreateLCOWScratch: %s fulfilled from cache (%s)", destFile, cacheFile) + return nil + } + } + + // Create the VHDX + if err := vhd.CreateVhdx(destFile, sizeGB, defaultVhdxBlockSizeMB); err != nil { + return fmt.Errorf("failed to create VHDx %s: %s", destFile, err) + } + + controller, lun, err := lcowUVM.AddSCSI(destFile, "", false) // No destination as not formatted + if err != nil { + return err + } + + logrus.Debugf("hcsshim::CreateLCOWScratch: %s at C=%d L=%d", destFile, controller, lun) + + // Validate /sys/bus/scsi/devices/C:0:0:L exists as a directory + + startTime := time.Now() + for { + testdCommand := []string{"test", "-d", fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d", controller, lun)} + testdProc, _, err := CreateProcess(&ProcessOptions{ + HCSSystem: lcowUVM.ComputeSystem(), + CreateInUtilityVm: true, + CopyTimeout: timeout.ExternalCommandToStart, + Process: &specs.Process{Args: testdCommand}, + }) + if err != nil { + lcowUVM.RemoveSCSI(destFile) + return fmt.Errorf("failed to run %+v following hot-add %s to utility VM: %s", testdCommand, destFile, err) + } + defer testdProc.Close() + + testdProc.WaitTimeout(timeout.ExternalCommandToComplete) + testdExitCode, err := testdProc.ExitCode() + if err != nil { + lcowUVM.RemoveSCSI(destFile) + return fmt.Errorf("failed to get exit code from from %+v following hot-add %s to utility VM: %s", testdCommand, destFile, err) + } + if testdExitCode != 0 { + currentTime := time.Now() + elapsedTime := currentTime.Sub(startTime) + if elapsedTime > timeout.TestDRetryLoop { + lcowUVM.RemoveSCSI(destFile) + return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM", testdCommand, testdExitCode, destFile) + } + } else { + break + } + time.Sleep(time.Millisecond * 10) + } + + // Get the device from under the block subdirectory by doing a simple ls. This will come back as (eg) `sda` + var lsOutput bytes.Buffer + lsCommand := []string{"ls", fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", controller, lun)} + lsProc, _, err := CreateProcess(&ProcessOptions{ + HCSSystem: lcowUVM.ComputeSystem(), + CreateInUtilityVm: true, + CopyTimeout: timeout.ExternalCommandToStart, + Process: &specs.Process{Args: lsCommand}, + Stdout: &lsOutput, + }) + + if err != nil { + lcowUVM.RemoveSCSI(destFile) + return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", lsCommand, destFile, err) + } + defer lsProc.Close() + lsProc.WaitTimeout(timeout.ExternalCommandToComplete) + lsExitCode, err := lsProc.ExitCode() + if err != nil { + lcowUVM.RemoveSCSI(destFile) + return fmt.Errorf("failed to get exit code from `%+v` following hot-add %s to utility VM: %s", lsCommand, destFile, err) + } + if lsExitCode != 0 { + lcowUVM.RemoveSCSI(destFile) + return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM", lsCommand, lsExitCode, destFile) + } + device := fmt.Sprintf(`/dev/%s`, strings.TrimSpace(lsOutput.String())) + logrus.Debugf("hcsshim: CreateExt4Vhdx: %s: device at %s", destFile, device) + + // Format it ext4 + mkfsCommand := []string{"mkfs.ext4", "-q", "-E", "lazy_itable_init=1", "-O", `^has_journal,sparse_super2,uninit_bg,^resize_inode`, device} + var mkfsStderr bytes.Buffer + mkfsProc, _, err := CreateProcess(&ProcessOptions{ + HCSSystem: lcowUVM.ComputeSystem(), + CreateInUtilityVm: true, + CopyTimeout: timeout.ExternalCommandToStart, + Process: &specs.Process{Args: mkfsCommand}, + Stderr: &mkfsStderr, + }) + if err != nil { + lcowUVM.RemoveSCSI(destFile) + return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", mkfsCommand, destFile, err) + } + defer mkfsProc.Close() + mkfsProc.WaitTimeout(timeout.ExternalCommandToComplete) + mkfsExitCode, err := mkfsProc.ExitCode() + if err != nil { + lcowUVM.RemoveSCSI(destFile) + return fmt.Errorf("failed to get exit code from `%+v` following hot-add %s to utility VM: %s", mkfsCommand, destFile, err) + } + if mkfsExitCode != 0 { + lcowUVM.RemoveSCSI(destFile) + return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM: %s", mkfsCommand, mkfsExitCode, destFile, strings.TrimSpace(mkfsStderr.String())) + } + + // Hot-Remove before we copy it + if err := lcowUVM.RemoveSCSI(destFile); err != nil { + return fmt.Errorf("failed to hot-remove: %s", err) + } + + // Populate the cache. + if cacheFile != "" && (sizeGB == DefaultScratchSizeGB) { + if err := copyfile.CopyFile(destFile, cacheFile, true); err != nil { + return fmt.Errorf("failed to seed cache '%s' from '%s': %s", destFile, cacheFile, err) + } + } + + logrus.Debugf("hcsshim::CreateLCOWScratch: %s created (non-cache)", destFile) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/lcow/tar2vhd.go b/vendor/github.com/Microsoft/hcsshim/internal/lcow/tar2vhd.go new file mode 100644 index 00000000..08d90a73 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/lcow/tar2vhd.go @@ -0,0 +1,46 @@ +package lcow + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/Microsoft/hcsshim/internal/uvm" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +// TarToVhd streams a tarstream contained in an io.Reader to a fixed vhd file +func TarToVhd(lcowUVM *uvm.UtilityVM, targetVHDFile string, reader io.Reader) (int64, error) { + logrus.Debugf("hcsshim: TarToVhd: %s", targetVHDFile) + + if lcowUVM == nil { + return 0, fmt.Errorf("no utility VM passed") + } + + //defer uvm.DebugLCOWGCS() + + outFile, err := os.Create(targetVHDFile) + if err != nil { + return 0, fmt.Errorf("tar2vhd failed to create %s: %s", targetVHDFile, err) + } + defer outFile.Close() + // BUGBUG Delete the file on failure + + tar2vhd, byteCounts, err := CreateProcess(&ProcessOptions{ + HCSSystem: lcowUVM.ComputeSystem(), + Process: &specs.Process{Args: []string{"tar2vhd"}}, + CreateInUtilityVm: true, + Stdin: reader, + Stdout: outFile, + CopyTimeout: 2 * time.Minute, + }) + if err != nil { + return 0, fmt.Errorf("failed to start tar2vhd for %s: %s", targetVHDFile, err) + } + defer tar2vhd.Close() + + logrus.Debugf("hcsshim: TarToVhd: %s created, %d bytes", targetVHDFile, byteCounts.Out) + return byteCounts.Out, err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/lcow/types.go b/vendor/github.com/Microsoft/hcsshim/internal/lcow/types.go new file mode 100644 index 00000000..c490518c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/lcow/types.go @@ -0,0 +1,11 @@ +package lcow + +import "github.com/Microsoft/hcsshim/internal/schema2" + +// Additional fields to hcsschema.ProcessParameters used by LCOW +type ProcessParameters struct { + hcsschema.ProcessParameters + + CreateInUtilityVm bool `json:",omitempty"` + OCIProcess interface{} `json:"OciProcess,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/lcow/vhd2tar.go b/vendor/github.com/Microsoft/hcsshim/internal/lcow/vhd2tar.go new file mode 100644 index 00000000..e446b71b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/lcow/vhd2tar.go @@ -0,0 +1,75 @@ +package lcow + +import ( + "fmt" + "io" + // "os" + + "github.com/Microsoft/hcsshim/internal/uvm" + // specs "github.com/opencontainers/runtime-spec/specs-go" + // "github.com/sirupsen/logrus" +) + +// VhdToTar does what is says - it exports a VHD in a specified +// folder (either a read-only layer.vhd, or a read-write scratch vhdx) to a +// ReadCloser containing a tar-stream of the layers contents. +func VhdToTar(lcowUVM *uvm.UtilityVM, vhdFile string, uvmMountPath string, isContainerScratch bool, vhdSize int64) (io.ReadCloser, error) { + return nil, fmt.Errorf("not implemented yet") + // logrus.Debugf("hcsshim: VhdToTar: %s isScratch: %t", vhdFile, isContainerScratch) + + // if lcowUVM == nil { + // return nil, fmt.Errorf("cannot VhdToTar as no utility VM is in configuration") + // } + + // //defer uvm.DebugLCOWGCS() + + // vhdHandle, err := os.Open(vhdFile) + // if err != nil { + // return nil, fmt.Errorf("hcsshim: VhdToTar: failed to open %s: %s", vhdFile, err) + // } + // defer vhdHandle.Close() + // logrus.Debugf("hcsshim: VhdToTar: exporting %s, size %d, isScratch %t", vhdHandle.Name(), vhdSize, isContainerScratch) + + // // Different binary depending on whether a RO layer or a RW scratch + // command := "vhd2tar" + // if isContainerScratch { + // command = fmt.Sprintf("exportSandbox -path %s", uvmMountPath) + // } + + // // tar2vhd, byteCounts, err := lcowUVM.CreateProcess(&uvm.ProcessOptions{ + // // Process: &specs.Process{Args: []string{"tar2vhd"}}, + // // Stdin: reader, + // // Stdout: outFile, + // // }) + + // // Start the binary in the utility VM + // proc, stdin, stdout, _, err := config.createLCOWUVMProcess(command) + // if err != nil { + // return nil, fmt.Errorf("hcsshim: VhdToTar: %s: failed to create utils process %s: %s", vhdHandle.Name(), command, err) + // } + + // if !isContainerScratch { + // // Send the VHD contents to the utility VM processes stdin handle if not a container scratch + // logrus.Debugf("hcsshim: VhdToTar: copying the layer VHD into the utility VM") + // if _, err = copyWithTimeout(stdin, vhdHandle, vhdSize, processOperationTimeoutSeconds, fmt.Sprintf("vhdtotarstream: sending %s to %s", vhdHandle.Name(), command)); err != nil { + // proc.Close() + // return nil, fmt.Errorf("hcsshim: VhdToTar: %s: failed to copyWithTimeout on the stdin pipe (to utility VM): %s", vhdHandle.Name(), err) + // } + // } + + // // Start a goroutine which copies the stdout (ie the tar stream) + // reader, writer := io.Pipe() + // go func() { + // defer writer.Close() + // defer proc.Close() + // logrus.Debugf("hcsshim: VhdToTar: copying tar stream back from the utility VM") + // bytes, err := copyWithTimeout(writer, stdout, vhdSize, processOperationTimeoutSeconds, fmt.Sprintf("vhdtotarstream: copy tarstream from %s", command)) + // if err != nil { + // logrus.Errorf("hcsshim: VhdToTar: %s: copyWithTimeout on the stdout pipe (from utility VM) failed: %s", vhdHandle.Name(), err) + // } + // logrus.Debugf("hcsshim: VhdToTar: copied %d bytes of the tarstream of %s from the utility VM", bytes, vhdHandle.Name()) + // }() + + // // Return the read-side of the pipe connected to the goroutine which is reading from the stdout of the process in the utility VM + // return reader, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go new file mode 100644 index 00000000..cf2c166d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -0,0 +1,32 @@ +package logfields + +const ( + // Identifiers + + ContainerID = "cid" + UVMID = "uvm-id" + ProcessID = "pid" + + // Common Misc + + // Timeout represents an operation timeout. + Timeout = "timeout" + JSON = "json" + + // Keys/values + + Field = "field" + OCIAnnotation = "oci-annotation" + Value = "value" + + // Golang type's + + ExpectedType = "expected-type" + Bool = "bool" + Uint32 = "uint32" + Uint64 = "uint64" + + // runhcs + + VMShimOperation = "vmshim-op" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go new file mode 100644 index 00000000..e5b8b85e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go @@ -0,0 +1,24 @@ +package longpath + +import ( + "path/filepath" + "strings" +) + +// LongAbs makes a path absolute and returns it in NT long path form. +func LongAbs(path string) (string, error) { + if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) { + return path, nil + } + if !filepath.IsAbs(path) { + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + path = absPath + } + if strings.HasPrefix(path, `\\`) { + return `\\?\UNC\` + path[2:], nil + } + return `\\?\` + path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go new file mode 100644 index 00000000..7e95efb3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go @@ -0,0 +1,52 @@ +package mergemaps + +import "encoding/json" + +// Merge recursively merges map `fromMap` into map `ToMap`. Any pre-existing values +// in ToMap are overwritten. Values in fromMap are added to ToMap. +// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang +func Merge(fromMap, ToMap interface{}) interface{} { + switch fromMap := fromMap.(type) { + case map[string]interface{}: + ToMap, ok := ToMap.(map[string]interface{}) + if !ok { + return fromMap + } + for keyToMap, valueToMap := range ToMap { + if valueFromMap, ok := fromMap[keyToMap]; ok { + fromMap[keyToMap] = Merge(valueFromMap, valueToMap) + } else { + fromMap[keyToMap] = valueToMap + } + } + case nil: + // merge(nil, map[string]interface{...}) -> map[string]interface{...} + ToMap, ok := ToMap.(map[string]interface{}) + if ok { + return ToMap + } + } + return fromMap +} + +// MergeJSON merges the contents of a JSON string into an object representation, +// returning a new object suitable for translating to JSON. +func MergeJSON(object interface{}, additionalJSON []byte) (interface{}, error) { + if len(additionalJSON) == 0 { + return object, nil + } + objectJSON, err := json.Marshal(object) + if err != nil { + return nil, err + } + var objectMap, newMap map[string]interface{} + err = json.Unmarshal(objectJSON, &objectMap) + if err != nil { + return nil, err + } + err = json.Unmarshal(additionalJSON, &newMap) + if err != nil { + return nil, err + } + return Merge(newMap, objectMap), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/export.go b/vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/export.go new file mode 100644 index 00000000..d8df1ef5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/export.go @@ -0,0 +1,79 @@ +// Package ociwclayer provides functions for importing and exporting Windows +// container layers from and to their OCI tar representation. +package ociwclayer + +import ( + "io" + "path/filepath" + + "github.com/Microsoft/go-winio/archive/tar" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" +) + +var driverInfo = hcsshim.DriverInfo{} + +// ExportLayer writes an OCI layer tar stream from the provided on-disk layer. +// The caller must specify the parent layers, if any, ordered from lowest to +// highest layer. +// +// The layer will be mounted for this process, so the caller should ensure that +// it is not currently mounted. +func ExportLayer(w io.Writer, path string, parentLayerPaths []string) error { + err := hcsshim.ActivateLayer(driverInfo, path) + if err != nil { + return err + } + defer hcsshim.DeactivateLayer(driverInfo, path) + + // Prepare and unprepare the layer to ensure that it has been initialized. + err = hcsshim.PrepareLayer(driverInfo, path, parentLayerPaths) + if err != nil { + return err + } + err = hcsshim.UnprepareLayer(driverInfo, path) + if err != nil { + return err + } + + r, err := hcsshim.NewLayerReader(driverInfo, path, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err != nil { + return err + } + return cerr +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), whiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/import.go b/vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/import.go new file mode 100644 index 00000000..de7a7202 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/import.go @@ -0,0 +1,141 @@ +package ociwclayer + +import ( + "bufio" + "io" + "os" + "path" + "path/filepath" + "strings" + + winio "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" +) + +const whiteoutPrefix = ".wh." + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } +) + +// ImportLayer reads a layer from an OCI layer tar stream and extracts it to the +// specified path. The caller must specify the parent layers, if any, ordered +// from lowest to highest layer. +// +// The caller must ensure that the thread or process has acquired backup and +// restore privileges. +// +// This function returns the total size of the layer's files, in bytes. +func ImportLayer(r io.Reader, path string, parentLayerPaths []string) (int64, error) { + err := os.MkdirAll(path, 0) + if err != nil { + return 0, err + } + w, err := hcsshim.NewLayerWriter(hcsshim.DriverInfo{}, path, parentLayerPaths) + if err != nil { + return 0, err + } + n, err := writeLayerFromTar(r, w, path) + cerr := w.Close() + if err != nil { + return 0, err + } + if cerr != nil { + return 0, cerr + } + return n, nil +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, whiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(whiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/ospath/join.go b/vendor/github.com/Microsoft/hcsshim/internal/ospath/join.go new file mode 100644 index 00000000..c0254607 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/ospath/join.go @@ -0,0 +1,14 @@ +package ospath + +import ( + "path" + "path/filepath" +) + +// Join joins paths using the target OS's path separator. +func Join(os string, elem ...string) string { + if os == "windows" { + return filepath.Join(elem...) + } + return path.Join(elem...) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go new file mode 100644 index 00000000..6c4a6415 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go @@ -0,0 +1,287 @@ +package regstate + +import ( + "encoding/json" + "fmt" + "net/url" + "os" + "path/filepath" + "reflect" + "syscall" + + "golang.org/x/sys/windows/registry" +) + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go regstate.go + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW + +const ( + _REG_OPTION_VOLATILE = 1 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 +) + +type Key struct { + registry.Key + Name string +} + +var localMachine = &Key{registry.LOCAL_MACHINE, "HKEY_LOCAL_MACHINE"} +var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"} + +var rootPath = `SOFTWARE\Microsoft\runhcs` + +type NotFoundError struct { + Id string +} + +func (err *NotFoundError) Error() string { + return fmt.Sprintf("ID '%s' was not found", err.Id) +} + +func IsNotFoundError(err error) bool { + _, ok := err.(*NotFoundError) + return ok +} + +type NoStateError struct { + ID string + Key string +} + +func (err *NoStateError) Error() string { + return fmt.Sprintf("state '%s' is not present for ID '%s'", err.Key, err.ID) +} + +func createVolatileKey(k *Key, path string, access uint32) (newk *Key, openedExisting bool, err error) { + var ( + h syscall.Handle + d uint32 + ) + fullpath := filepath.Join(k.Name, path) + err = regCreateKeyEx(syscall.Handle(k.Key), syscall.StringToUTF16Ptr(path), 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d) + if err != nil { + return nil, false, &os.PathError{Op: "RegCreateKeyEx", Path: fullpath, Err: err} + } + return &Key{registry.Key(h), fullpath}, d == _REG_OPENED_EXISTING_KEY, nil +} + +func hive(perUser bool) *Key { + r := localMachine + if perUser { + r = localUser + } + return r +} + +func Open(root string, perUser bool) (*Key, error) { + k, _, err := createVolatileKey(hive(perUser), rootPath, registry.ALL_ACCESS) + if err != nil { + return nil, err + } + defer k.Close() + + k2, _, err := createVolatileKey(k, url.PathEscape(root), registry.ALL_ACCESS) + if err != nil { + return nil, err + } + return k2, nil +} + +func RemoveAll(root string, perUser bool) error { + k, err := hive(perUser).open(rootPath) + if err != nil { + return err + } + defer k.Close() + r, err := k.open(url.PathEscape(root)) + if err != nil { + return err + } + defer r.Close() + ids, err := r.Enumerate() + if err != nil { + return err + } + for _, id := range ids { + err = r.Remove(id) + if err != nil { + return err + } + } + r.Close() + return k.Remove(root) +} + +func (k *Key) Close() error { + err := k.Key.Close() + k.Key = 0 + return err +} + +func (k *Key) Enumerate() ([]string, error) { + escapedIDs, err := k.ReadSubKeyNames(0) + if err != nil { + return nil, err + } + var ids []string + for _, e := range escapedIDs { + id, err := url.PathUnescape(e) + if err == nil { + ids = append(ids, id) + } + } + return ids, nil +} + +func (k *Key) open(name string) (*Key, error) { + fullpath := filepath.Join(k.Name, name) + nk, err := registry.OpenKey(k.Key, name, registry.ALL_ACCESS) + if err != nil { + return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} + } + return &Key{nk, fullpath}, nil +} + +func (k *Key) openid(id string) (*Key, error) { + escaped := url.PathEscape(id) + fullpath := filepath.Join(k.Name, escaped) + nk, err := k.open(escaped) + if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { + return nil, &NotFoundError{id} + } + if err != nil { + return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} + } + return nk, nil +} + +func (k *Key) Remove(id string) error { + escaped := url.PathEscape(id) + err := registry.DeleteKey(k.Key, escaped) + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NotFoundError{id} + } + return &os.PathError{Op: "RegDeleteKey", Path: filepath.Join(k.Name, escaped), Err: err} + } + return nil +} + +func (k *Key) set(id string, create bool, key string, state interface{}) error { + var sk *Key + var err error + if create { + var existing bool + eid := url.PathEscape(id) + sk, existing, err = createVolatileKey(k, eid, registry.ALL_ACCESS) + if err != nil { + return err + } + defer sk.Close() + if existing { + sk.Close() + return fmt.Errorf("container %s already exists", id) + } + } else { + sk, err = k.openid(id) + if err != nil { + return err + } + defer sk.Close() + } + switch reflect.TypeOf(state).Kind() { + case reflect.Bool: + v := uint32(0) + if state.(bool) { + v = 1 + } + err = sk.SetDWordValue(key, v) + case reflect.Int: + err = sk.SetQWordValue(key, uint64(state.(int))) + case reflect.String: + err = sk.SetStringValue(key, state.(string)) + default: + var js []byte + js, err = json.Marshal(state) + if err != nil { + return err + } + err = sk.SetBinaryValue(key, js) + } + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NoStateError{id, key} + } + return &os.PathError{Op: "RegSetValueEx", Path: sk.Name + ":" + key, Err: err} + } + return nil +} + +func (k *Key) Create(id, key string, state interface{}) error { + return k.set(id, true, key, state) +} + +func (k *Key) Set(id, key string, state interface{}) error { + return k.set(id, false, key, state) +} + +func (k *Key) Clear(id, key string) error { + sk, err := k.openid(id) + if err != nil { + return err + } + defer sk.Close() + err = sk.DeleteValue(key) + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NoStateError{id, key} + } + return &os.PathError{Op: "RegDeleteValue", Path: sk.Name + ":" + key, Err: err} + } + return nil +} + +func (k *Key) Get(id, key string, state interface{}) error { + sk, err := k.openid(id) + if err != nil { + return err + } + defer sk.Close() + + var js []byte + switch reflect.TypeOf(state).Elem().Kind() { + case reflect.Bool: + var v uint64 + v, _, err = sk.GetIntegerValue(key) + if err == nil { + *state.(*bool) = v != 0 + } + case reflect.Int: + var v uint64 + v, _, err = sk.GetIntegerValue(key) + if err == nil { + *state.(*int) = int(v) + } + case reflect.String: + var v string + v, _, err = sk.GetStringValue(key) + if err == nil { + *state.(*string) = string(v) + } + default: + js, _, err = sk.GetBinaryValue(key) + } + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NoStateError{id, key} + } + return &os.PathError{Op: "RegQueryValueEx", Path: sk.Name + ":" + key, Err: err} + } + if js != nil { + err = json.Unmarshal(js, state) + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate_test.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate_test.go new file mode 100644 index 00000000..7a449e27 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate_test.go @@ -0,0 +1,185 @@ +package regstate + +import ( + "os" + "testing" +) + +var testKey = "runhcs-test-test-key" + +func prepTest(t *testing.T) { + err := RemoveAll(testKey, true) + if err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } +} + +func TestLifetime(t *testing.T) { + prepTest(t) + k, err := Open(testKey, true) + if err != nil { + t.Fatal(err) + } + ids, err := k.Enumerate() + if err != nil { + t.Fatal(err) + } + if len(ids) != 0 { + t.Fatal("wrong count", len(ids)) + } + + id := "a/b/c" + key := "key" + err = k.Set(id, key, 1) + if err == nil { + t.Fatal("expected error") + } + + var i int + err = k.Get(id, key, &i) + if err == nil { + t.Fatal("expected error") + } + + err = k.Create(id, key, 2) + if err != nil { + t.Fatal(err) + } + + ids, err = k.Enumerate() + if err != nil { + t.Fatal(err) + } + if len(ids) != 1 { + t.Fatal("wrong count", len(ids)) + } + if ids[0] != id { + t.Fatal("wrong value", ids[0]) + } + + err = k.Get(id, key, &i) + if err != nil { + t.Fatal(err) + } + if i != 2 { + t.Fatal("got wrong value", i) + } + + err = k.Set(id, key, 3) + if err != nil { + t.Fatal(err) + } + err = k.Get(id, key, &i) + if err != nil { + t.Fatal(err) + } + if i != 3 { + t.Fatal("got wrong value", i) + } + + err = k.Remove(id) + if err != nil { + t.Fatal(err) + } + err = k.Remove(id) + if err == nil { + t.Fatal("expected error") + } + + ids, err = k.Enumerate() + if err != nil { + t.Fatal(err) + } + if len(ids) != 0 { + t.Fatal("wrong count", len(ids)) + } +} + +func TestBool(t *testing.T) { + prepTest(t) + k, err := Open(testKey, true) + if err != nil { + t.Fatal(err) + } + id := "x" + key := "y" + err = k.Create(id, key, true) + if err != nil { + t.Fatal(err) + } + b := false + err = k.Get(id, key, &b) + if err != nil { + t.Fatal(err) + } + if !b { + t.Fatal("value did not marshal correctly") + } +} + +func TestInt(t *testing.T) { + prepTest(t) + k, err := Open(testKey, true) + if err != nil { + t.Fatal(err) + } + id := "x" + key := "y" + err = k.Create(id, key, 10) + if err != nil { + t.Fatal(err) + } + v := 0 + err = k.Get(id, key, &v) + if err != nil { + t.Fatal(err) + } + if v != 10 { + t.Fatal("value did not marshal correctly") + } +} + +func TestString(t *testing.T) { + prepTest(t) + k, err := Open(testKey, true) + if err != nil { + t.Fatal(err) + } + id := "x" + key := "y" + err = k.Create(id, key, "blah") + if err != nil { + t.Fatal(err) + } + v := "" + err = k.Get(id, key, &v) + if err != nil { + t.Fatal(err) + } + if v != "blah" { + t.Fatal("value did not marshal correctly") + } +} + +func TestJson(t *testing.T) { + prepTest(t) + k, err := Open(testKey, true) + if err != nil { + t.Fatal(err) + } + id := "x" + key := "y" + v := struct{ X int }{5} + err = k.Create(id, key, &v) + if err != nil { + t.Fatal(err) + } + v.X = 0 + err = k.Get(id, key, &v) + if err != nil { + t.Fatal(err) + } + if v.X != 5 { + t.Fatal("value did not marshal correctly: ", v) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go new file mode 100644 index 00000000..4e349ad4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go @@ -0,0 +1,51 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package regstate + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") +) + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/requesttype/types.go b/vendor/github.com/Microsoft/hcsshim/internal/requesttype/types.go new file mode 100644 index 00000000..8c0e1b45 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/requesttype/types.go @@ -0,0 +1,10 @@ +package requesttype + +// These are constants for v2 schema modify requests. + +// RequestType const +const ( + Add = "Add" + Remove = "Remove" + PreAdd = "PreAdd" // For networking +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go new file mode 100644 index 00000000..3015c364 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go @@ -0,0 +1,71 @@ +package runhcs + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/guid" +) + +// ContainerState represents the platform agnostic pieces relating to a +// running container's status and state +type ContainerState struct { + // Version is the OCI version for the container + Version string `json:"ociVersion"` + // ID is the container ID + ID string `json:"id"` + // InitProcessPid is the init process id in the parent namespace + InitProcessPid int `json:"pid"` + // Status is the current status of the container, running, paused, ... + Status string `json:"status"` + // Bundle is the path on the filesystem to the bundle + Bundle string `json:"bundle"` + // Rootfs is a path to a directory containing the container's root filesystem. + Rootfs string `json:"rootfs"` + // Created is the unix timestamp for the creation time of the container in UTC + Created time.Time `json:"created"` + // Annotations is the user defined annotations added to the config. + Annotations map[string]string `json:"annotations,omitempty"` + // The owner of the state directory (the owner of the container). + Owner string `json:"owner"` +} + +// GetErrorFromPipe returns reads from `pipe` and verifies if the operation +// returned success or error. If error converts that to an error and returns. If +// `p` is not nill will issue a `Kill` and `Wait` for exit. +func GetErrorFromPipe(pipe io.Reader, p *os.Process) error { + serr, err := ioutil.ReadAll(pipe) + if err != nil { + return err + } + + if bytes.Equal(serr, ShimSuccess) { + return nil + } + + extra := "" + if p != nil { + p.Kill() + state, err := p.Wait() + if err != nil { + panic(err) + } + extra = fmt.Sprintf(", exit code %d", state.Sys().(syscall.WaitStatus).ExitCode) + } + if len(serr) == 0 { + return fmt.Errorf("unknown shim failure%s", extra) + } + + return errors.New(string(serr)) +} + +// VMPipePath returns the named pipe path for the vm shim. +func VMPipePath(hostUniqueID guid.GUID) string { + return SafePipePath("runhcs-vm-" + hostUniqueID.String()) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go new file mode 100644 index 00000000..dcbb1903 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go @@ -0,0 +1,16 @@ +package runhcs + +import "net/url" + +const ( + SafePipePrefix = `\\.\pipe\ProtectedPrefix\Administrators\` +) + +// ShimSuccess is the byte stream returned on a successful operation. +var ShimSuccess = []byte{0, 'O', 'K', 0} + +func SafePipePath(name string) string { + // Use a pipe in the Administrators protected prefixed to prevent malicious + // squatting. + return SafePipePrefix + url.PathEscape(name) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util_test.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util_test.go new file mode 100644 index 00000000..980d4be8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util_test.go @@ -0,0 +1,17 @@ +package runhcs + +import ( + "testing" +) + +func Test_SafePipePath(t *testing.T) { + tests := []string{"test", "test with spaces", "test/with\\\\.\\slashes", "test.with..dots..."} + expected := []string{"test", "test%20with%20spaces", "test%2Fwith%5C%5C.%5Cslashes", "test.with..dots..."} + for i, test := range tests { + actual := SafePipePath(test) + e := SafePipePrefix + expected[i] + if actual != e { + t.Fatalf("SafePipePath: actual '%s' != '%s'", actual, expected[i]) + } + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go new file mode 100644 index 00000000..2c8957b8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go @@ -0,0 +1,43 @@ +package runhcs + +import ( + "encoding/json" + + "github.com/Microsoft/go-winio" +) + +// VMRequestOp is an operation that can be issued to a VM shim. +type VMRequestOp string + +const ( + // OpCreateContainer is a create container request. + OpCreateContainer VMRequestOp = "create" + // OpSyncNamespace is a `cni.NamespaceTypeGuest` sync request with the UVM. + OpSyncNamespace VMRequestOp = "sync" + // OpUnmountContainer is a container unmount request. + OpUnmountContainer VMRequestOp = "unmount" + // OpUnmountContainerDiskOnly is a container unmount disk request. + OpUnmountContainerDiskOnly VMRequestOp = "unmount-disk" +) + +// VMRequest is an operation request that is issued to a VM shim. +type VMRequest struct { + ID string + Op VMRequestOp +} + +// IssueVMRequest issues a request to a shim at the given pipe. +func IssueVMRequest(pipepath string, req *VMRequest) error { + pipe, err := winio.DialPipe(pipepath, nil) + if err != nil { + return err + } + defer pipe.Close() + if err := json.NewEncoder(pipe).Encode(req); err != nil { + return err + } + if err := GetErrorFromPipe(pipe, nil); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go new file mode 100644 index 00000000..f31edfaf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -0,0 +1,431 @@ +package safefile + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + "syscall" + "unicode/utf16" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/longpath" + + winio "github.com/Microsoft/go-winio" +) + +//go:generate go run $GOROOT\src\syscall\mksyscall_windows.go -output zsyscall_windows.go safeopen.go + +//sys ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile +//sys ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile +//sys rtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys localAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc +//sys localFree(ptr uintptr) = kernel32.LocalFree + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName uintptr + Attributes uintptr + SecurityDescriptor uintptr + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type fileLinkInformation struct { + ReplaceIfExists bool + RootDirectory uintptr + FileNameLength uint32 + FileName [1]uint16 +} + +type fileDispositionInformationEx struct { + Flags uintptr +} + +const ( + _FileLinkInformation = 11 + _FileDispositionInformationEx = 64 + + FILE_READ_ATTRIBUTES = 0x0080 + FILE_WRITE_ATTRIBUTES = 0x0100 + DELETE = 0x10000 + + FILE_OPEN = 1 + FILE_CREATE = 2 + + FILE_DIRECTORY_FILE = 0x00000001 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_DELETE_ON_CLOSE = 0x00001000 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + + FILE_DISPOSITION_DELETE = 0x00000001 + + _OBJ_DONT_REPARSE = 0x1000 + + _STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B +) + +func OpenRoot(path string) (*os.File, error) { + longpath, err := longpath.LongAbs(path) + if err != nil { + return nil, err + } + return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) +} + +func ntRelativePath(path string) ([]uint16, error) { + path = filepath.Clean(path) + if strings.Contains(path, ":") { + // Since alternate data streams must follow the file they + // are attached to, finding one here (out of order) is invalid. + return nil, errors.New("path contains invalid character `:`") + } + fspath := filepath.FromSlash(path) + if len(fspath) > 0 && fspath[0] == '\\' { + return nil, errors.New("expected relative path") + } + + path16 := utf16.Encode(([]rune)(fspath)) + if len(path16) > 32767 { + return nil, syscall.ENAMETOOLONG + } + + return path16, nil +} + +// openRelativeInternal opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + var ( + h uintptr + iosb ioStatusBlock + oa objectAttributes + ) + + path16, err := ntRelativePath(path) + if err != nil { + return nil, err + } + + if root == nil || root.Fd() == 0 { + return nil, errors.New("missing root directory") + } + + upathBuffer := localAlloc(0, int(unsafe.Sizeof(unicodeString{}))+len(path16)*2) + defer localFree(upathBuffer) + + upath := (*unicodeString)(unsafe.Pointer(upathBuffer)) + upath.Length = uint16(len(path16) * 2) + upath.MaximumLength = upath.Length + upath.Buffer = upathBuffer + unsafe.Sizeof(*upath) + copy((*[32768]uint16)(unsafe.Pointer(upath.Buffer))[:], path16) + + oa.Length = unsafe.Sizeof(oa) + oa.ObjectName = upathBuffer + oa.RootDirectory = uintptr(root.Fd()) + oa.Attributes = _OBJ_DONT_REPARSE + status := ntCreateFile( + &h, + accessMask|syscall.SYNCHRONIZE, + &oa, + &iosb, + nil, + 0, + shareFlags, + createDisposition, + FILE_OPEN_FOR_BACKUP_INTENT|FILE_SYNCHRONOUS_IO_NONALERT|flags, + nil, + 0, + ) + if status != 0 { + return nil, rtlNtStatusToDosError(status) + } + + fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path)) + if err != nil { + syscall.Close(syscall.Handle(h)) + return nil, err + } + + return os.NewFile(h, fullPath), nil +} + +// OpenRelative opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func OpenRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags) + if err != nil { + err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err} + } + return f, err +} + +// LinkRelative creates a hard link from oldname to newname (relative to oldroot +// and newroot), failing if any of the intermediate path components are reparse +// points. +func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error { + // Open the old file. + oldf, err := openRelativeInternal( + oldname, + oldroot, + syscall.FILE_WRITE_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + FILE_OPEN, + 0, + ) + if err != nil { + return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer oldf.Close() + + // Open the parent of the new file. + var parent *os.File + parentPath := filepath.Dir(newname) + if parentPath != "." { + parent, err = openRelativeInternal( + parentPath, + newroot, + syscall.GENERIC_READ, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + FILE_OPEN, + FILE_DIRECTORY_FILE) + if err != nil { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer parent.Close() + + fi, err := winio.GetFileBasicInfo(parent) + if err != nil { + return err + } + if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: rtlNtStatusToDosError(_STATUS_REPARSE_POINT_ENCOUNTERED)} + } + + } else { + parent = newroot + } + + // Issue an NT call to create the link. This will be safe because NT will + // not open any more directories to create the link, so it cannot walk any + // more reparse points. + newbase := filepath.Base(newname) + newbase16, err := ntRelativePath(newbase) + if err != nil { + return err + } + + size := int(unsafe.Offsetof(fileLinkInformation{}.FileName)) + len(newbase16)*2 + linkinfoBuffer := localAlloc(0, size) + defer localFree(linkinfoBuffer) + linkinfo := (*fileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) + linkinfo.RootDirectory = parent.Fd() + linkinfo.FileNameLength = uint32(len(newbase16) * 2) + copy((*[32768]uint16)(unsafe.Pointer(&linkinfo.FileName[0]))[:], newbase16) + + var iosb ioStatusBlock + status := ntSetInformationFile( + oldf.Fd(), + &iosb, + linkinfoBuffer, + uint32(size), + _FileLinkInformation, + ) + if status != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: rtlNtStatusToDosError(status)} + } + + return nil +} + +// deleteOnClose marks a file to be deleted when the handle is closed. +func deleteOnClose(f *os.File) error { + disposition := fileDispositionInformationEx{Flags: FILE_DISPOSITION_DELETE} + var iosb ioStatusBlock + status := ntSetInformationFile( + f.Fd(), + &iosb, + uintptr(unsafe.Pointer(&disposition)), + uint32(unsafe.Sizeof(disposition)), + _FileDispositionInformationEx, + ) + if status != 0 { + return rtlNtStatusToDosError(status) + } + return nil +} + +// clearReadOnly clears the readonly attribute on a file. +func clearReadOnly(f *os.File) error { + bi, err := winio.GetFileBasicInfo(f) + if err != nil { + return err + } + if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 { + return nil + } + sbi := winio.FileBasicInfo{ + FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY, + } + if sbi.FileAttributes == 0 { + sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL + } + return winio.SetFileBasicInfo(f, &sbi) +} + +// RemoveRelative removes a file or directory relative to a root, failing if any +// intermediate path components are reparse points. +func RemoveRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + FILE_READ_ATTRIBUTES|FILE_WRITE_ATTRIBUTES|DELETE, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + FILE_OPEN, + FILE_OPEN_REPARSE_POINT) + if err == nil { + defer f.Close() + err = deleteOnClose(f) + if err == syscall.ERROR_ACCESS_DENIED { + // Maybe the file is marked readonly. Clear the bit and retry. + clearReadOnly(f) + err = deleteOnClose(f) + } + } + if err != nil { + return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err} + } + return nil +} + +// RemoveAllRelative removes a directory tree relative to a root, failing if any +// intermediate path components are reparse points. +func RemoveAllRelative(path string, root *os.File) error { + fi, err := LstatRelative(path, root) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + // If this is a reparse point, it can't have children. Simple remove will do. + err := RemoveRelative(path, root) + if err == nil || os.IsNotExist(err) { + return nil + } + return err + } + + // It is necessary to use os.Open as Readdirnames does not work with + // OpenRelative. This is safe because the above lstatrelative fails + // if the target is outside the root, and we know this is not a + // symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check. + fd, err := os.Open(filepath.Join(root.Name(), path)) + if err != nil { + if os.IsNotExist(err) { + // Race. It was deleted between the Lstat and Open. + // Return nil per RemoveAll's docs. + return nil + } + return err + } + + // Remove contents & return first error. + for { + names, err1 := fd.Readdirnames(100) + for _, name := range names { + err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root) + if err == nil { + err = err1 + } + } + if err1 == io.EOF { + break + } + // If Readdirnames returned an error, use it. + if err == nil { + err = err1 + } + if len(names) == 0 { + break + } + } + fd.Close() + + // Remove directory. + err1 := RemoveRelative(path, root) + if err1 == nil || os.IsNotExist(err1) { + return nil + } + if err == nil { + err = err1 + } + return err +} + +// MkdirRelative creates a directory relative to a root, failing if any +// intermediate path components are reparse points. +func MkdirRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + FILE_CREATE, + FILE_DIRECTORY_FILE) + if err == nil { + f.Close() + } else { + err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err} + } + return err +} + +// LstatRelative performs a stat operation on a file relative to a root, failing +// if any intermediate path components are reparse points. +func LstatRelative(path string, root *os.File) (os.FileInfo, error) { + f, err := openRelativeInternal( + path, + root, + FILE_READ_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + FILE_OPEN, + FILE_OPEN_REPARSE_POINT) + if err != nil { + return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} + } + defer f.Close() + return f.Stat() +} + +// EnsureNotReparsePointRelative validates that a given file (relative to a +// root) and all intermediate path components are not a reparse points. +func EnsureNotReparsePointRelative(path string, root *os.File) error { + // Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT. + f, err := OpenRelative( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + FILE_OPEN, + 0) + if err != nil { + return err + } + f.Close() + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_admin_test.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_admin_test.go new file mode 100644 index 00000000..08969178 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_admin_test.go @@ -0,0 +1,125 @@ +// +build admin + +package safefile + +import ( + "os" + "path/filepath" + "syscall" + "testing" +) + +func TestOpenRelative(t *testing.T) { + badroot, err := tempRoot() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(badroot.Name()) + defer badroot.Close() + + root, err := tempRoot() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root.Name()) + defer root.Close() + + // Create a file + f, err := OpenRelative("foo", root, 0, syscall.FILE_SHARE_READ, FILE_CREATE, 0) + if err != nil { + t.Fatal(err) + } + f.Close() + + // Create a directory + err = MkdirRelative("dir", root) + if err != nil { + t.Fatal(err) + } + + // Create a file in the bad root + f, err = os.Create(filepath.Join(badroot.Name(), "badfile")) + if err != nil { + t.Fatal(err) + } + f.Close() + + // Create a directory symlink to the bad root + err = os.Symlink(badroot.Name(), filepath.Join(root.Name(), "dsymlink")) + if err != nil { + t.Fatal(err) + } + + // Create a file symlink to the bad file + err = os.Symlink(filepath.Join(badroot.Name(), "badfile"), filepath.Join(root.Name(), "symlink")) + if err != nil { + t.Fatal(err) + } + + // Make sure opens cannot happen through the symlink + f, err = OpenRelative("dsymlink/foo", root, 0, syscall.FILE_SHARE_READ, FILE_CREATE, 0) + if err == nil { + f.Close() + t.Fatal("created file in wrong tree!") + } + t.Log(err) + + // Check again using EnsureNotReparsePointRelative + err = EnsureNotReparsePointRelative("dsymlink", root) + if err == nil { + t.Fatal("reparse check should have failed") + } + t.Log(err) + + // Make sure links work + err = LinkRelative("foo", root, "hardlink", root) + if err != nil { + t.Fatal(err) + } + + // Even inside directories + err = LinkRelative("foo", root, "dir/bar", root) + if err != nil { + t.Fatal(err) + } + + // Make sure links cannot happen through the symlink + err = LinkRelative("foo", root, "dsymlink/hardlink", root) + if err == nil { + f.Close() + t.Fatal("created link in wrong tree!") + } + t.Log(err) + + // In either direction + err = LinkRelative("dsymlink/badfile", root, "bar", root) + if err == nil { + f.Close() + t.Fatal("created link in wrong tree!") + } + t.Log(err) + + // Make sure remove cannot happen through the symlink + err = RemoveRelative("symlink/badfile", root) + if err == nil { + t.Fatal("remove in wrong tree!") + } + + // Remove the symlink + err = RemoveAllRelative("symlink", root) + if err != nil { + t.Fatal(err) + } + + // Make sure it's not possible to escape with .. (NT doesn't support .. at the kernel level) + f, err = OpenRelative("..", root, syscall.GENERIC_READ, syscall.FILE_SHARE_READ, FILE_OPEN, 0) + if err == nil { + t.Fatal("escaped the directory") + } + t.Log(err) + + // Should not have touched the other directory + if _, err = os.Lstat(filepath.Join(badroot.Name(), "badfile")); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_test.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_test.go new file mode 100644 index 00000000..faae6769 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_test.go @@ -0,0 +1,53 @@ +package safefile + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + + winio "github.com/Microsoft/go-winio" +) + +func tempRoot() (*os.File, error) { + name, err := ioutil.TempDir("", "hcsshim-test") + if err != nil { + return nil, err + } + f, err := OpenRoot(name) + if err != nil { + os.Remove(name) + return nil, err + } + return f, nil +} + +func TestRemoveRelativeReadOnly(t *testing.T) { + root, err := tempRoot() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root.Name()) + defer root.Close() + + p := filepath.Join(root.Name(), "foo") + f, err := os.Create(p) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + bi := winio.FileBasicInfo{} + bi.FileAttributes = syscall.FILE_ATTRIBUTE_READONLY + err = winio.SetFileBasicInfo(f, &bi) + if err != nil { + t.Fatal(err) + } + f.Close() + + err = RemoveRelative("foo", root) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go new file mode 100644 index 00000000..709b9d34 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/zsyscall_windows.go @@ -0,0 +1,79 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package safefile + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") +) + +func ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + status = uint32(r0) + return +} + +func ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + status = uint32(r0) + return +} + +func rtlNtStatusToDosError(status uint32) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func localAlloc(flags uint32, size int) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + ptr = uintptr(r0) + return +} + +func localFree(ptr uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go new file mode 100644 index 00000000..995433ac --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema1/schema1.go @@ -0,0 +1,245 @@ +package schema1 + +import ( + "encoding/json" + "time" + + "github.com/Microsoft/hcsshim/internal/schema2" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig struct { + ApplicationName string `json:",omitempty"` + CommandLine string `json:",omitempty"` + CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows + User string `json:",omitempty"` + WorkingDirectory string `json:",omitempty"` + Environment map[string]string `json:",omitempty"` + EmulateConsole bool `json:",omitempty"` + CreateStdInPipe bool `json:",omitempty"` + CreateStdOutPipe bool `json:",omitempty"` + CreateStdErrPipe bool `json:",omitempty"` + ConsoleSize [2]uint `json:",omitempty"` + CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows + OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows +} + +type Layer struct { + ID string + Path string +} + +type MappedDir struct { + HostPath string + ContainerPath string + ReadOnly bool + BandwidthMaximum uint64 + IOPSMaximum uint64 + CreateInUtilityVM bool + // LinuxMetadata - Support added in 1803/RS4+. + LinuxMetadata bool `json:",omitempty"` +} + +type MappedPipe struct { + HostPath string + ContainerPipeName string +} + +type HvRuntime struct { + ImagePath string `json:",omitempty"` + SkipTemplate bool `json:",omitempty"` + LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM + LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM + LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode + BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD + WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD +} + +type MappedVirtualDisk struct { + HostPath string `json:",omitempty"` // Path to VHD on the host + ContainerPath string // Platform-specific mount point path in the container + CreateInUtilityVM bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" + AttachOnly bool `json:",omitempty:` +} + +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice struct { + // InterfaceClassGUID of the device to assign to container. + InterfaceClassGUID string `json:"InterfaceClassGuid,omitempty"` +} + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig struct { + SystemType string // HCS requires this to be hard-coded to "Container" + Name string // Name of the container. We use the docker ID. + Owner string `json:",omitempty"` // The management platform that created this container + VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID} + IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows + LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID + Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID + Credentials string `json:",omitempty"` // Credentials information + ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container. + ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares. + ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit. + StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS + StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second + StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller + MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes + HostName string `json:",omitempty"` // Hostname + MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts) + MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes + HvPartition bool // True if it a Hyper-V Container + NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with. + EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container + HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM + Servicing bool `json:",omitempty"` // True if this container is for servicing + AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution + DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution + ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. + TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed + MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start + AssignedDevices []AssignedDevice `json:",omitempty"` // Array of devices to assign. NOTE: Support added in RS5 +} + +type ComputeSystemQuery struct { + IDs []string `json:"Ids,omitempty"` + Types []string `json:",omitempty"` + Names []string `json:",omitempty"` + Owners []string `json:",omitempty"` +} + +type PropertyType string + +const ( + PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 + PropertyTypeProcessList = "ProcessList" // V1 and V2 + PropertyTypeMappedVirtualDisk = "MappedVirtualDisk" // Not supported in V2 schema call + PropertyTypeGuestConnection = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 +) + +type PropertyQuery struct { + PropertyTypes []PropertyType `json:",omitempty"` +} + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties struct { + ID string `json:"Id"` + State string + Name string + SystemType string + Owner string + SiloGUID string `json:"SiloGuid,omitempty"` + RuntimeID string `json:"RuntimeId,omitempty"` + IsRuntimeTemplate bool `json:",omitempty"` + RuntimeImagePath string `json:",omitempty"` + Stopped bool `json:",omitempty"` + ExitType string `json:",omitempty"` + AreUpdatesPending bool `json:",omitempty"` + ObRoot string `json:",omitempty"` + Statistics Statistics `json:",omitempty"` + ProcessList []ProcessListItem `json:",omitempty"` + MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"` + GuestConnectionInfo GuestConnectionInfo `json:",omitempty"` +} + +// MemoryStats holds the memory statistics for a container +type MemoryStats struct { + UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:",omitempty"` + RuntimeUser100ns uint64 `json:",omitempty"` + RuntimeKernel100ns uint64 `json:",omitempty"` +} + +// StorageStats holds the storage statistics for a container +type StorageStats struct { + ReadCountNormalized uint64 `json:",omitempty"` + ReadSizeBytes uint64 `json:",omitempty"` + WriteCountNormalized uint64 `json:",omitempty"` + WriteSizeBytes uint64 `json:",omitempty"` +} + +// NetworkStats holds the network statistics for a container +type NetworkStats struct { + BytesReceived uint64 `json:",omitempty"` + BytesSent uint64 `json:",omitempty"` + PacketsReceived uint64 `json:",omitempty"` + PacketsSent uint64 `json:",omitempty"` + DroppedPacketsIncoming uint64 `json:",omitempty"` + DroppedPacketsOutgoing uint64 `json:",omitempty"` + EndpointId string `json:",omitempty"` + InstanceId string `json:",omitempty"` +} + +// Statistics is the structure returned by a statistics call on a container +type Statistics struct { + Timestamp time.Time `json:",omitempty"` + ContainerStartTime time.Time `json:",omitempty"` + Uptime100ns uint64 `json:",omitempty"` + Memory MemoryStats `json:",omitempty"` + Processor ProcessorStats `json:",omitempty"` + Storage StorageStats `json:",omitempty"` + Network []NetworkStats `json:",omitempty"` +} + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem struct { + CreateTimestamp time.Time `json:",omitempty"` + ImageName string `json:",omitempty"` + KernelTime100ns uint64 `json:",omitempty"` + MemoryCommitBytes uint64 `json:",omitempty"` + MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"` + MemoryWorkingSetSharedBytes uint64 `json:",omitempty"` + ProcessId uint32 `json:",omitempty"` + UserTime100ns uint64 `json:",omitempty"` +} + +// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container +type MappedVirtualDiskController struct { + MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"` +} + +// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM +type GuestDefinedCapabilities struct { + NamespaceAddRequestSupported bool `json:",omitempty"` + SignalProcessSupported bool `json:",omitempty"` +} + +// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM +type GuestConnectionInfo struct { + SupportedSchemaVersions []hcsschema.Version `json:",omitempty"` + ProtocolVersion uint32 `json:",omitempty"` + GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"` +} + +// Type of Request Support in ModifySystem +type RequestType string + +// Type of Resource Support in ModifySystem +type ResourceType string + +// RequestType const +const ( + Add RequestType = "Add" + Remove RequestType = "Remove" + Network ResourceType = "Network" +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse struct { + Resource ResourceType `json:"ResourceType"` + Data interface{} `json:"Settings"` + Request RequestType `json:"RequestType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go new file mode 100644 index 00000000..09456cbc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/attachment.go @@ -0,0 +1,31 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Attachment struct { + + Type_ string `json:"Type,omitempty"` + + Path string `json:"Path,omitempty"` + + IgnoreFlushes bool `json:"IgnoreFlushes,omitempty"` + + CachingMode string `json:"CachingMode,omitempty"` + + NoWriteHardening bool `json:"NoWriteHardening,omitempty"` + + DisableExpansionOptimization bool `json:"DisableExpansionOptimization,omitempty"` + + IgnoreRelativeLocator bool `json:"IgnoreRelativeLocator,omitempty"` + + CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go new file mode 100644 index 00000000..ecbbed4c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/battery.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Battery struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go new file mode 100644 index 00000000..243779ea --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/cache_query_stats_response.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CacheQueryStatsResponse struct { + + L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"` + + L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"` + + L3LocalBwBytes int32 `json:"L3LocalBwBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go new file mode 100644 index 00000000..ca75277a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/chipset.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Chipset struct { + Uefi *Uefi `json:"Uefi,omitempty"` + + IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"` + + BaseBoardSerialNumber string `json:"BaseBoardSerialNumber,omitempty"` + + ChassisSerialNumber string `json:"ChassisSerialNumber,omitempty"` + + ChassisAssetTag string `json:"ChassisAssetTag,omitempty"` + + UseUtc bool `json:"UseUtc,omitempty"` + + // LinuxKernelDirect - Added in v2.2 Builds >=181117 + LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go new file mode 100644 index 00000000..88f01707 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/close_handle.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CloseHandle struct { + + Handle string `json:"Handle,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go new file mode 100644 index 00000000..c665be3d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/com_port.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. +type ComPort struct { + + NamedPipe string `json:"NamedPipe,omitempty"` + + OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go new file mode 100644 index 00000000..85785d28 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/compute_system.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ComputeSystem struct { + + Owner string `json:"Owner,omitempty"` + + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + HostedSystem *HostedSystem `json:"HostedSystem,omitempty"` + + Container *Container `json:"Container,omitempty"` + + VirtualMachine *VirtualMachine `json:"VirtualMachine,omitempty"` + + ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go new file mode 100644 index 00000000..1a47db7d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/configuration.go @@ -0,0 +1,72 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "net/http" +) + +// contextKeys are used to identify the type of value in the context. +// Since these are string, it is possible to get a short description of the +// context key for logging and debugging using key.String(). + +type contextKey string + +func (c contextKey) String() string { + return "auth " + string(c) +} + +var ( + // ContextOAuth2 takes a oauth2.TokenSource as authentication for the request. + ContextOAuth2 = contextKey("token") + + // ContextBasicAuth takes BasicAuth as authentication for the request. + ContextBasicAuth = contextKey("basic") + + // ContextAccessToken takes a string oauth2 access token as authentication for the request. + ContextAccessToken = contextKey("accesstoken") + + // ContextAPIKey takes an APIKey as authentication for the request + ContextAPIKey = contextKey("apikey") +) + +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +type BasicAuth struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + +// APIKey provides API key based authentication to a request passed via context using ContextAPIKey +type APIKey struct { + Key string + Prefix string +} + +type Configuration struct { + BasePath string `json:"basePath,omitempty"` + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + HTTPClient *http.Client +} + +func NewConfiguration() *Configuration { + cfg := &Configuration{ + BasePath: "https://localhost", + DefaultHeader: make(map[string]string), + UserAgent: "Swagger-Codegen/2.1.0/go", + } + return cfg +} + +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go new file mode 100644 index 00000000..adbe07fe --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/console_size.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ConsoleSize struct { + + Height int32 `json:"Height,omitempty"` + + Width int32 `json:"Width,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go new file mode 100644 index 00000000..17dce28b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container.go @@ -0,0 +1,35 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Container struct { + + GuestOs *GuestOs `json:"GuestOs,omitempty"` + + Storage *Storage `json:"Storage,omitempty"` + + MappedDirectories []MappedDirectory `json:"MappedDirectories,omitempty"` + + MappedPipes []MappedPipe `json:"MappedPipes,omitempty"` + + Memory *Memory `json:"Memory,omitempty"` + + Processor *Processor `json:"Processor,omitempty"` + + Networking *Networking `json:"Networking,omitempty"` + + HvSocket *HvSocket `json:"HvSocket,omitempty"` + + ContainerCredentialGuard *ContainerCredentialGuardState `json:"ContainerCredentialGuard,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + AssignedDevices []Device `json:"AssignedDevices,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go new file mode 100644 index 00000000..0f8f6443 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_state.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardState struct { + + // Authentication cookie for calls to a Container Credential Guard instance. + Cookie string `json:"Cookie,omitempty"` + + // Name of the RPC endpoint of the Container Credential Guard instance. + RpcEndpoint string `json:"RpcEndpoint,omitempty"` + + // Transport used for the configured Container Credential Guard instance. + Transport string `json:"Transport,omitempty"` + + // Credential spec used for the configured Container Credential Guard instance. + CredentialSpec string `json:"CredentialSpec,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go new file mode 100644 index 00000000..754797e2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/container_memory_information.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// memory usage as viewed from within the container +type ContainerMemoryInformation struct { + + TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` + + TotalUsage int32 `json:"TotalUsage,omitempty"` + + CommittedBytes int32 `json:"CommittedBytes,omitempty"` + + SharedCommittedBytes int32 `json:"SharedCommittedBytes,omitempty"` + + CommitLimitBytes int32 `json:"CommitLimitBytes,omitempty"` + + PeakCommitmentBytes int32 `json:"PeakCommitmentBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go new file mode 100644 index 00000000..ca319bbb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/device.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Device struct { + + // The interface class guid of the device to assign to container. + InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go new file mode 100644 index 00000000..b2191c57 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/devices.go @@ -0,0 +1,43 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Devices struct { + + ComPorts map[string]ComPort `json:"ComPorts,omitempty"` + + Scsi map[string]Scsi `json:"Scsi,omitempty"` + + VirtualPMem *VirtualPMemController `json:"VirtualPMem,omitempty"` + + NetworkAdapters map[string]NetworkAdapter `json:"NetworkAdapters,omitempty"` + + VideoMonitor *VideoMonitor `json:"VideoMonitor,omitempty"` + + Keyboard *Keyboard `json:"Keyboard,omitempty"` + + Mouse *Mouse `json:"Mouse,omitempty"` + + HvSocket *HvSocket2 `json:"HvSocket,omitempty"` + + EnhancedModeVideo *EnhancedModeVideo `json:"EnhancedModeVideo,omitempty"` + + GuestCrashReporting *GuestCrashReporting `json:"GuestCrashReporting,omitempty"` + + VirtualSmb *VirtualSmb `json:"VirtualSmb,omitempty"` + + Plan9 *Plan9 `json:"Plan9,omitempty"` + + Battery *Battery `json:"Battery,omitempty"` + + FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` + + SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go new file mode 100644 index 00000000..4fe592f7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/enhanced_mode_video.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type EnhancedModeVideo struct { + + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go new file mode 100644 index 00000000..51011afe --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/flexible_io_device.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type FlexibleIoDevice struct { + + EmulatorId string `json:"EmulatorId,omitempty"` + + HostingModel string `json:"HostingModel,omitempty"` + + Configuration []string `json:"Configuration,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go new file mode 100644 index 00000000..7db29495 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestConnection struct { + + // Use Vsock rather than Hyper-V sockets to communicate with the guest service. + UseVsock bool `json:"UseVsock,omitempty"` + + // Don't disconnect the guest connection when pausing the virtual machine. + UseConnectedSuspend bool `json:"UseConnectedSuspend,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go new file mode 100644 index 00000000..8a369bab --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_connection_info.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Information about the guest. +type GuestConnectionInfo struct { + + // Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities. + SupportedSchemaVersions []Version `json:"SupportedSchemaVersions,omitempty"` + + ProtocolVersion int32 `json:"ProtocolVersion,omitempty"` + + GuestDefinedCapabilities *interface{} `json:"GuestDefinedCapabilities,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go new file mode 100644 index 00000000..c5fa7673 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_crash_reporting.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestCrashReporting struct { + + WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go new file mode 100644 index 00000000..c708fc7c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_os.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestOs struct { + + HostName string `json:"HostName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go new file mode 100644 index 00000000..ef1eec88 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/guest_state.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestState struct { + + // The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state. + GuestStateFilePath string `json:"GuestStateFilePath,omitempty"` + + // The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state. + RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"` + + // If true, the guest state and runtime state files will be used as templates to populate transient, in-memory state instead of using the files as persistent backing store. + ForceTransientState bool `json:"ForceTransientState,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go new file mode 100644 index 00000000..0797584c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hosted_system.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HostedSystem struct { + + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + Container *Container `json:"Container,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go new file mode 100644 index 00000000..ef9ffb8d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocket struct { + + Config *HvSocketSystemConfig `json:"Config,omitempty"` + + EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go new file mode 100644 index 00000000..a19ba15c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_2.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// HvSocket configuration for a VM +type HvSocket2 struct { + + HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go new file mode 100644 index 00000000..a848e91e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_service_config.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocketServiceConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to this specific service. If not specified, defaults to the system DefaultBindSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + BindSecurityDescriptor string `json:"BindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to this specific service. If not specified, defaults to the system DefaultConnectSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + ConnectSecurityDescriptor string `json:"ConnectSecurityDescriptor,omitempty"` + + // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors + AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go new file mode 100644 index 00000000..69f4f9d3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/hv_socket_system_config.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. +type HvSocketSystemConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds). + DefaultBindSecurityDescriptor string `json:"DefaultBindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to an unlisted service in the VM/container. + DefaultConnectSecurityDescriptor string `json:"DefaultConnectSecurityDescriptor,omitempty"` + + ServiceTable map[string]HvSocketServiceConfig `json:"ServiceTable,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go new file mode 100644 index 00000000..3d3fa3b1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/keyboard.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Keyboard struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go new file mode 100644 index 00000000..b63b8ef1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/layer.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Layer struct { + + Id string `json:"Id,omitempty"` + + Path string `json:"Path,omitempty"` + + PathType string `json:"PathType,omitempty"` + + // Unspecified defaults to Enabled + Cache string `json:"Cache,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go new file mode 100644 index 00000000..0ab6c280 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/linux_kernel_direct.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.2 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LinuxKernelDirect struct { + KernelFilePath string `json:"KernelFilePath,omitempty"` + + InitRdPath string `json:"InitRdPath,omitempty"` + + KernelCmdLine string `json:"KernelCmdLine,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go new file mode 100644 index 00000000..a823a6d3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_directory.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedDirectory struct { + + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` + + ContainerPath string `json:"ContainerPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go new file mode 100644 index 00000000..2d1d2604 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mapped_pipe.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedPipe struct { + + ContainerPipeName string `json:"ContainerPipeName,omitempty"` + + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go new file mode 100644 index 00000000..e1d135a3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory struct { + + SizeInMB int32 `json:"SizeInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go new file mode 100644 index 00000000..27d0b8c4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_2.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory2 struct { + SizeInMB int32 `json:"SizeInMB,omitempty"` + + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + + EnableHotHint bool `json:"EnableHotHint,omitempty"` + + EnableColdHint bool `json:"EnableColdHint,omitempty"` + + EnableEpf bool `json:"EnableEpf,omitempty"` + + // EnableDeferredCommit is private in the schema. If regenerated need to add back. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go new file mode 100644 index 00000000..bdd87dff --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_information_for_vm.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MemoryInformationForVm struct { + + VirtualNodeCount int32 `json:"VirtualNodeCount,omitempty"` + + VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"` + + VirtualNodes []VirtualNodeInfo `json:"VirtualNodes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go new file mode 100644 index 00000000..6214970f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/memory_stats.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Memory runtime statistics +type MemoryStats struct { + + MemoryUsageCommitBytes int32 `json:"MemoryUsageCommitBytes,omitempty"` + + MemoryUsageCommitPeakBytes int32 `json:"MemoryUsageCommitPeakBytes,omitempty"` + + MemoryUsagePrivateWorkingSetBytes int32 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go new file mode 100644 index 00000000..d29455a3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/modify_setting_request.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModifySettingRequest struct { + ResourcePath string `json:"ResourcePath,omitempty"` + + RequestType string `json:"RequestType,omitempty"` + + Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated + + GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go new file mode 100644 index 00000000..ccf8b938 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/mouse.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Mouse struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go new file mode 100644 index 00000000..c586f66c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/network_adapter.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NetworkAdapter struct { + + EndpointId string `json:"EndpointId,omitempty"` + + MacAddress string `json:"MacAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go new file mode 100644 index 00000000..12c47827 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/networking.go @@ -0,0 +1,24 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Networking struct { + + AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"` + + DnsSearchList string `json:"DnsSearchList,omitempty"` + + NetworkSharedContainerName string `json:"NetworkSharedContainerName,omitempty"` + + // Guid in windows; string in linux + Namespace string `json:"Namespace,omitempty"` + + NetworkAdapters []string `json:"NetworkAdapters,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go new file mode 100644 index 00000000..1cd70d17 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_notification.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Notification data that is indicated to components running in the Virtual Machine. +type PauseNotification struct { + + Reason string `json:"Reason,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go new file mode 100644 index 00000000..780a5cae --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/pause_options.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Options for HcsPauseComputeSystem +type PauseOptions struct { + + SuspensionLevel string `json:"SuspensionLevel,omitempty"` + + HostedNotification *PauseNotification `json:"HostedNotification,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go new file mode 100644 index 00000000..705c677e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9 struct { + + Shares []Plan9Share `json:"Shares,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go new file mode 100644 index 00000000..eb171817 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/plan9_share.go @@ -0,0 +1,33 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9Share struct { + + Name string `json:"Name,omitempty"` + + // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. + AccessName string `json:"AccessName,omitempty"` + + Path string `json:"Path,omitempty"` + + Port int32 `json:"Port,omitempty"` + + // Flags are marked private. Until they are exported correctly + // + // ReadOnly 0x00000001 + // LinuxMetadata 0x00000004 + // CaseSensitive 0x00000008 + Flags int32 `json:"Flags,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go new file mode 100644 index 00000000..63e0b7f8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_details.go @@ -0,0 +1,34 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Information about a process running in a container +type ProcessDetails struct { + + ProcessId int32 `json:"ProcessId,omitempty"` + + ImageName string `json:"ImageName,omitempty"` + + CreateTimestamp time.Time `json:"CreateTimestamp,omitempty"` + + UserTime100ns int32 `json:"UserTime100ns,omitempty"` + + KernelTime100ns int32 `json:"KernelTime100ns,omitempty"` + + MemoryCommitBytes int32 `json:"MemoryCommitBytes,omitempty"` + + MemoryWorkingSetPrivateBytes int32 `json:"MemoryWorkingSetPrivateBytes,omitempty"` + + MemoryWorkingSetSharedBytes int32 `json:"MemoryWorkingSetSharedBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go new file mode 100644 index 00000000..29bc2e3d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_modify_request.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Passed to HcsRpc_ModifyProcess +type ProcessModifyRequest struct { + + Operation string `json:"Operation,omitempty"` + + ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"` + + CloseHandle *CloseHandle `json:"CloseHandle,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go new file mode 100644 index 00000000..470c5573 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_parameters.go @@ -0,0 +1,47 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessParameters struct { + + ApplicationName string `json:"ApplicationName,omitempty"` + + CommandLine string `json:"CommandLine,omitempty"` + + // optional alternative to CommandLine, currently only supported by Linux GCS + CommandArgs []string `json:"CommandArgs,omitempty"` + + User string `json:"User,omitempty"` + + WorkingDirectory string `json:"WorkingDirectory,omitempty"` + + Environment map[string]string `json:"Environment,omitempty"` + + // if set, will run as low-privilege process + RestrictedToken bool `json:"RestrictedToken,omitempty"` + + // if set, ignore StdErrPipe + EmulateConsole bool `json:"EmulateConsole,omitempty"` + + CreateStdInPipe bool `json:"CreateStdInPipe,omitempty"` + + CreateStdOutPipe bool `json:"CreateStdOutPipe,omitempty"` + + CreateStdErrPipe bool `json:"CreateStdErrPipe,omitempty"` + + // height then width + ConsoleSize []int32 `json:"ConsoleSize,omitempty"` + + // if set, find an existing session for the user and create the process in it + UseExistingLogin bool `json:"UseExistingLogin,omitempty"` + + // if set, use the legacy console instead of conhost + UseLegacyConsole bool `json:"UseLegacyConsole,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go new file mode 100644 index 00000000..20793d15 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/process_status.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Status of a process running in a container +type ProcessStatus struct { + + ProcessId int32 `json:"ProcessId,omitempty"` + + Exited bool `json:"Exited,omitempty"` + + ExitCode int32 `json:"ExitCode,omitempty"` + + LastWaitResult int32 `json:"LastWaitResult,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go new file mode 100644 index 00000000..7a60b024 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor struct { + + Count int32 `json:"Count,omitempty"` + + Maximum int32 `json:"Maximum,omitempty"` + + Weight int32 `json:"Weight,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go new file mode 100644 index 00000000..40d3e735 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_2.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor2 struct { + + Count int32 `json:"Count,omitempty"` + + Limit int32 `json:"Limit,omitempty"` + + Weight int32 `json:"Weight,omitempty"` + + ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go new file mode 100644 index 00000000..9d3b77e5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/processor_stats.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU runtime statistics +type ProcessorStats struct { + + TotalRuntime100ns int32 `json:"TotalRuntime100ns,omitempty"` + + RuntimeUser100ns int32 `json:"RuntimeUser100ns,omitempty"` + + RuntimeKernel100ns int32 `json:"RuntimeKernel100ns,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go new file mode 100644 index 00000000..6db2a48f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/properties.go @@ -0,0 +1,47 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Properties struct { + + Id string `json:"Id,omitempty"` + + SystemType string `json:"SystemType,omitempty"` + + RuntimeOsType string `json:"RuntimeOsType,omitempty"` + + Name string `json:"Name,omitempty"` + + Owner string `json:"Owner,omitempty"` + + RuntimeId string `json:"RuntimeId,omitempty"` + + RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` + + State string `json:"State,omitempty"` + + Stopped bool `json:"Stopped,omitempty"` + + ExitType string `json:"ExitType,omitempty"` + + Memory *MemoryInformationForVm `json:"Memory,omitempty"` + + Statistics *Statistics `json:"Statistics,omitempty"` + + ProcessList []ProcessDetails `json:"ProcessList,omitempty"` + + TerminateOnLastHandleClosed bool `json:"TerminateOnLastHandleClosed,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"` + + GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go new file mode 100644 index 00000000..22b92ffd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/property_query.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// By default the basic properties will be returned. This query provides a way to request specific properties. +type PropertyQuery struct { + + PropertyTypes []string `json:"PropertyTypes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go new file mode 100644 index 00000000..97e45312 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/rdp_connection_options.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RdpConnectionOptions struct { + + AccessSids []string `json:"AccessSids,omitempty"` + + NamedPipe string `json:"NamedPipe,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go new file mode 100644 index 00000000..fa574ccc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_changes.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryChanges struct { + + AddValues []RegistryValue `json:"AddValues,omitempty"` + + DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go new file mode 100644 index 00000000..fab03bc6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_key.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryKey struct { + + Hive string `json:"Hive,omitempty"` + + Name string `json:"Name,omitempty"` + + Volatile bool `json:"Volatile,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go new file mode 100644 index 00000000..1589f484 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/registry_value.go @@ -0,0 +1,31 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryValue struct { + + Key *RegistryKey `json:"Key,omitempty"` + + Name string `json:"Name,omitempty"` + + Type_ string `json:"Type,omitempty"` + + // One and only one value type must be set. + StringValue string `json:"StringValue,omitempty"` + + BinaryValue string `json:"BinaryValue,omitempty"` + + DWordValue int32 `json:"DWordValue,omitempty"` + + QWordValue int32 `json:"QWordValue,omitempty"` + + // Only used if RegistryValueType is CustomType The data is in BinaryValue + CustomType int32 `json:"CustomType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go new file mode 100644 index 00000000..778ff587 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/restore_state.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RestoreState struct { + + // The path to the save state file to restore the system from. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` + + // The ID of the template system to clone this new system off of. An empty string indicates the system should not be cloned from a template. + TemplateSystemId string `json:"TemplateSystemId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go new file mode 100644 index 00000000..e55fa1d9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/save_options.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SaveOptions struct { + + // The type of save operation to be performed. + SaveType string `json:"SaveType,omitempty"` + + // The path to the file that will container the saved state. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go new file mode 100644 index 00000000..bf253a47 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/scsi.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Scsi struct { + + // Map of attachments, where the key is the integer LUN number on the controller. + Attachments map[string]Attachment `json:"Attachments,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go new file mode 100644 index 00000000..bd573f6c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_configuration.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryConfiguration struct { + + Regions []SharedMemoryRegion `json:"Regions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go new file mode 100644 index 00000000..a57b2cba --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegion struct { + + SectionName string `json:"SectionName,omitempty"` + + StartOffset int32 `json:"StartOffset,omitempty"` + + Length int32 `json:"Length,omitempty"` + + AllowGuestWrite bool `json:"AllowGuestWrite,omitempty"` + + HiddenFromGuest bool `json:"HiddenFromGuest,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go new file mode 100644 index 00000000..d9a50cc7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/shared_memory_region_info.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegionInfo struct { + + SectionName string `json:"SectionName,omitempty"` + + GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go new file mode 100644 index 00000000..599c06e8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/silo_properties.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Silo job information +type SiloProperties struct { + + Enabled bool `json:"Enabled,omitempty"` + + JobName string `json:"JobName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go new file mode 100644 index 00000000..5cb3ed93 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/statistics.go @@ -0,0 +1,30 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Runtime statistics for a container +type Statistics struct { + + Timestamp time.Time `json:"Timestamp,omitempty"` + + ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"` + + Uptime100ns int32 `json:"Uptime100ns,omitempty"` + + Processor *ProcessorStats `json:"Processor,omitempty"` + + Memory *MemoryStats `json:"Memory,omitempty"` + + Storage *StorageStats `json:"Storage,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go new file mode 100644 index 00000000..2627af91 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Storage struct { + + // List of layers that describe the parent hierarchy for a container's storage. These layers combined together, presented as a disposable and/or committable working storage, are used by the container to record all changes done to the parent layers. + Layers []Layer `json:"Layers,omitempty"` + + // Path that points to the scratch space of a container, where parent layers are combined together to present a new disposable and/or committable layer with the changes done during its runtime. + Path string `json:"Path,omitempty"` + + QoS *StorageQoS `json:"QoS,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go new file mode 100644 index 00000000..8c5255df --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_qo_s.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type StorageQoS struct { + + IopsMaximum int32 `json:"IopsMaximum,omitempty"` + + BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go new file mode 100644 index 00000000..198ea57d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/storage_stats.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Storage runtime statistics +type StorageStats struct { + + ReadCountNormalized int32 `json:"ReadCountNormalized,omitempty"` + + ReadSizeBytes int32 `json:"ReadSizeBytes,omitempty"` + + WriteCountNormalized int32 `json:"WriteCountNormalized,omitempty"` + + WriteSizeBytes int32 `json:"WriteSizeBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go new file mode 100644 index 00000000..af2e3c82 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/topology.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Topology struct { + + Memory *Memory2 `json:"Memory,omitempty"` + + Processor *Processor2 `json:"Processor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go new file mode 100644 index 00000000..ba91178f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Uefi struct { + + EnableDebugger bool `json:"EnableDebugger,omitempty"` + + SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` + + BootThis *UefiBootEntry `json:"BootThis,omitempty"` + + Console string `json:"Console,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go new file mode 100644 index 00000000..6620fb2b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/uefi_boot_entry.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type UefiBootEntry struct { + + DeviceType string `json:"DeviceType,omitempty"` + + DevicePath string `json:"DevicePath,omitempty"` + + DiskNumber int32 `json:"DiskNumber,omitempty"` + + OptionalData string `json:"OptionalData,omitempty"` + + VmbFsRootPath string `json:"VmbFsRootPath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go new file mode 100644 index 00000000..62c0e4d1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/version.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Version struct { + + Major int32 `json:"Major,omitempty"` + + Minor int32 `json:"Minor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go new file mode 100644 index 00000000..0958e560 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/video_monitor.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VideoMonitor struct { + + HorizontalResolution int32 `json:"HorizontalResolution,omitempty"` + + VerticalResolution int32 `json:"VerticalResolution,omitempty"` + + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go new file mode 100644 index 00000000..2d22b1bc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_machine.go @@ -0,0 +1,32 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachine struct { + + // StopOnReset is private in the schema. If regenerated need to put back. + StopOnReset bool `json:"StopOnReset,omitempty"` + + Chipset *Chipset `json:"Chipset,omitempty"` + + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + + Devices *Devices `json:"Devices,omitempty"` + + GuestState *GuestState `json:"GuestState,omitempty"` + + RestoreState *RestoreState `json:"RestoreState,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + + GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go new file mode 100644 index 00000000..48402d8e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_node_info.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualNodeInfo struct { + + VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"` + + PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"` + + VirtualProcessorCount int32 `json:"VirtualProcessorCount,omitempty"` + + MemoryUsageInPages int32 `json:"MemoryUsageInPages,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go new file mode 100644 index 00000000..f5b7f3e3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_controller.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemController struct { + Devices map[string]VirtualPMemDevice `json:"Devices,omitempty"` + + MaximumCount uint32 `json:"MaximumCount,omitempty"` + + MaximumSizeBytes uint64 `json:"MaximumSizeBytes,omitempty"` + + Backing string `json:"Backing,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go new file mode 100644 index 00000000..47714444 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_p_mem_device.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemDevice struct { + + HostPath string `json:"HostPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go new file mode 100644 index 00000000..76131b3a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmb struct { + + Shares []VirtualSmbShare `json:"Shares,omitempty"` + + DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go new file mode 100644 index 00000000..b50098a4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShare struct { + + Name string `json:"Name,omitempty"` + + Path string `json:"Path,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` + + Options *VirtualSmbShareOptions `json:"Options,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go new file mode 100644 index 00000000..c1894279 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/virtual_smb_share_options.go @@ -0,0 +1,63 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShareOptions struct { + + ReadOnly bool `json:"ReadOnly,omitempty"` + + // convert exclusive access to shared read access + ShareRead bool `json:"ShareRead,omitempty"` + + // all opens will use cached I/O + CacheIo bool `json:"CacheIo,omitempty"` + + // disable oplock support + NoOplocks bool `json:"NoOplocks,omitempty"` + + // Acquire the backup privilege when attempting to open + TakeBackupPrivilege bool `json:"TakeBackupPrivilege,omitempty"` + + // Use the identity of the share root when opening + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + // disable Direct Mapping + NoDirectmap bool `json:"NoDirectmap,omitempty"` + + // disable Byterange locks + NoLocks bool `json:"NoLocks,omitempty"` + + // disable Directory CHange Notifications + NoDirnotify bool `json:"NoDirnotify,omitempty"` + + // share is use for VM shared memory + VmSharedMemory bool `json:"VmSharedMemory,omitempty"` + + // allow access only to the files specified in AllowedFiles + RestrictFileAccess bool `json:"RestrictFileAccess,omitempty"` + + // disable all oplocks except Level II + ForceLevelIIOplocks bool `json:"ForceLevelIIOplocks,omitempty"` + + // Allow the host to reparse this base layer + ReparseBaseLayer bool `json:"ReparseBaseLayer,omitempty"` + + // Enable pseudo-oplocks + PseudoOplocks bool `json:"PseudoOplocks,omitempty"` + + // All opens will use non-cached IO + NonCacheIo bool `json:"NonCacheIo,omitempty"` + + // Enable pseudo directory change notifications + PseudoDirnotify bool `json:"PseudoDirnotify,omitempty"` + + // Block directory enumeration, renames, and deletes. + SingleFileMapping bool `json:"SingleFileMapping,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go new file mode 100644 index 00000000..39f62866 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/vm_memory.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VmMemory struct { + + AvailableMemory int32 `json:"AvailableMemory,omitempty"` + + AvailableMemoryBuffer int32 `json:"AvailableMemoryBuffer,omitempty"` + + ReservedMemory int32 `json:"ReservedMemory,omitempty"` + + AssignedMemory int32 `json:"AssignedMemory,omitempty"` + + SlpActive bool `json:"SlpActive,omitempty"` + + BalancingEnabled bool `json:"BalancingEnabled,omitempty"` + + DmOperationInProgress bool `json:"DmOperationInProgress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go new file mode 100644 index 00000000..cf632bbc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schema2/windows_crash_reporting.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type WindowsCrashReporting struct { + + DumpFileName string `json:"DumpFileName,omitempty"` + + MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go b/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go new file mode 100644 index 00000000..8bade81a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go @@ -0,0 +1,81 @@ +// +build windows + +package schemaversion + +import ( + "encoding/json" + "fmt" + + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/osversion" + "github.com/sirupsen/logrus" +) + +// SchemaV10 makes it easy for callers to get a v1.0 schema version object +func SchemaV10() *hcsschema.Version { + return &hcsschema.Version{Major: 1, Minor: 0} +} + +// SchemaV21 makes it easy for callers to get a v2.1 schema version object +func SchemaV21() *hcsschema.Version { + return &hcsschema.Version{Major: 2, Minor: 1} +} + +// isSupported determines if a given schema version is supported +func IsSupported(sv *hcsschema.Version) error { + if IsV10(sv) { + return nil + } + if IsV21(sv) { + if osversion.Get().Build < osversion.RS5 { + return fmt.Errorf("unsupported on this Windows build") + } + return nil + } + return fmt.Errorf("unknown schema version %s", String(sv)) +} + +// IsV10 determines if a given schema version object is 1.0. This was the only thing +// supported in RS1..3. It lives on in RS5, but will be deprecated in a future release. +func IsV10(sv *hcsschema.Version) bool { + if sv.Major == 1 && sv.Minor == 0 { + return true + } + return false +} + +// IsV21 determines if a given schema version object is 2.0. This was introduced in +// RS4, but not fully implemented. Recommended for applications using HCS in RS5 +// onwards. +func IsV21(sv *hcsschema.Version) bool { + if sv.Major == 2 && sv.Minor == 1 { + return true + } + return false +} + +// String returns a JSON encoding of a schema version object +func String(sv *hcsschema.Version) string { + b, err := json.Marshal(sv) + if err != nil { + return "" + } + return string(b[:]) +} + +// DetermineSchemaVersion works out what schema version to use based on build and +// requested option. +func DetermineSchemaVersion(requestedSV *hcsschema.Version) *hcsschema.Version { + sv := SchemaV10() + if osversion.Get().Build >= osversion.RS5 { + sv = SchemaV21() + } + if requestedSV != nil { + if err := IsSupported(requestedSV); err == nil { + sv = requestedSV + } else { + logrus.Warnf("Ignoring unsupported requested schema version %+v", requestedSV) + } + } + return sv +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion_test.go b/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion_test.go new file mode 100644 index 00000000..3f0f027a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion_test.go @@ -0,0 +1,63 @@ +package schemaversion + +import ( + "io/ioutil" + "testing" + + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/osversion" + _ "github.com/Microsoft/hcsshim/test/functional/manifest" + "github.com/sirupsen/logrus" +) + +func init() { + logrus.SetOutput(ioutil.Discard) +} + +func TestDetermineSchemaVersion(t *testing.T) { + osv := osversion.Get() + + if osv.Build >= osversion.RS5 { + if sv := DetermineSchemaVersion(nil); !IsV21(sv) { + t.Fatalf("expected v2") + } + if sv := DetermineSchemaVersion(SchemaV21()); !IsV21(sv) { + t.Fatalf("expected requested v2") + } + if sv := DetermineSchemaVersion(SchemaV10()); !IsV10(sv) { + t.Fatalf("expected requested v1") + } + if sv := DetermineSchemaVersion(&hcsschema.Version{}); !IsV21(sv) { + t.Fatalf("expected requested v2") + } + + if err := IsSupported(SchemaV21()); err != nil { + t.Fatalf("v2 expected to be supported") + } + if err := IsSupported(SchemaV10()); err != nil { + t.Fatalf("v1 expected to be supported") + } + + } else { + if sv := DetermineSchemaVersion(nil); !IsV10(sv) { + t.Fatalf("expected v1") + } + // Pre RS5 will downgrade to v1 even if request v2 + if sv := DetermineSchemaVersion(SchemaV21()); !IsV10(sv) { + t.Fatalf("expected requested v1") + } + if sv := DetermineSchemaVersion(SchemaV10()); !IsV10(sv) { + t.Fatalf("expected requested v1") + } + if sv := DetermineSchemaVersion(&hcsschema.Version{}); !IsV10(sv) { + t.Fatalf("expected requested v1") + } + + if err := IsSupported(SchemaV21()); err == nil { + t.Fatalf("didn't expect v2 to be supported") + } + if err := IsSupported(SchemaV10()); err != nil { + t.Fatalf("v1 expected to be supported") + } + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go new file mode 100644 index 00000000..ff3b6572 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go @@ -0,0 +1,70 @@ +package timeout + +import ( + "os" + "strconv" + "time" +) + +var ( + // defaultTimeout is the timeout for most operations that is not overridden. + defaultTimeout = 4 * time.Minute + + // defaultTimeoutTestdRetry is the retry loop timeout for testd to respond + // for a disk to come online in LCOW. + defaultTimeoutTestdRetry = 5 * time.Second +) + +// External variables for HCSShim consumers to use. +var ( + // SystemCreate is the timeout for creating a compute system + SystemCreate time.Duration = defaultTimeout + + // SystemStart is the timeout for starting a compute system + SystemStart time.Duration = defaultTimeout + + // SystemPause is the timeout for pausing a compute system + SystemPause time.Duration = defaultTimeout + + // SystemResume is the timeout for resuming a compute system + SystemResume time.Duration = defaultTimeout + + // SyscallWatcher is the timeout before warning of a potential stuck platform syscall. + SyscallWatcher time.Duration = defaultTimeout + + // Tar2VHD is the timeout for the tar2vhd operation to complete + Tar2VHD time.Duration = defaultTimeout + + // ExternalCommandToStart is the timeout for external commands to start + ExternalCommandToStart = defaultTimeout + + // ExternalCommandToComplete is the timeout for external commands to complete. + // Generally this means copying data from their stdio pipes. + ExternalCommandToComplete = defaultTimeout + + // TestDRetryLoop is the timeout for testd retry loop when onlining a SCSI disk in LCOW + TestDRetryLoop = defaultTimeoutTestdRetry +) + +func init() { + SystemCreate = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMCREATE", SystemCreate) + SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart) + SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause) + SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume) + SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher) + Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD) + ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart) + ExternalCommandToComplete = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDCOMPLETE", ExternalCommandToComplete) + TestDRetryLoop = durationFromEnvironment("HCSSHIM_TIMEOUT_TESTDRETRYLOOP", TestDRetryLoop) +} + +func durationFromEnvironment(env string, defaultValue time.Duration) time.Duration { + envTimeout := os.Getenv(env) + if len(envTimeout) > 0 { + e, err := strconv.Atoi(envTimeout) + if err == nil && e > 0 { + return time.Second * time.Duration(e) + } + } + return defaultValue +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go new file mode 100644 index 00000000..cf0e668a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go @@ -0,0 +1,19 @@ +package uvm + +import "fmt" + +const ( + // MaxVPMEMCount is the maximum number of VPMem devices that may be added to an LCOW + // utility VM + MaxVPMEMCount = 128 + + // DefaultVPMEMCount is the default number of VPMem devices that may be added to an LCOW + // utility VM if the create request doesn't specify how many. + DefaultVPMEMCount = 64 + + // DefaultVPMemSizeBytes is the default size of a VPMem device if the create request + // doesn't specify. + DefaultVPMemSizeBytes = 4 * 1024 * 1024 * 1024 // 4GB +) + +var errNotSupported = fmt.Errorf("not supported") diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go new file mode 100644 index 00000000..4339ef8d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go @@ -0,0 +1,11 @@ +package uvm + +import ( + "sync/atomic" +) + +// ContainerCounter is used for where we layout things for a container in +// a utility VM. For WCOW it'll be C:\c\N\. For LCOW it'll be /run/gcs/c/N/. +func (uvm *UtilityVM) ContainerCounter() uint64 { + return atomic.AddUint64(&uvm.containerCounter, 1) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go new file mode 100644 index 00000000..0daaef9b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go @@ -0,0 +1,62 @@ +package uvm + +import ( + "runtime" +) + +// Options are the set of options passed to Create() to create a utility vm. +type Options struct { + ID string // Identifier for the uvm. Defaults to generated GUID. + Owner string // Specifies the owner. Defaults to executable name. + AdditionHCSDocumentJSON string // Optional additional JSON to merge into the HCS document prior + + // MemorySizeInMB sets the UVM memory. If `0` will default to platform + // default. + MemorySizeInMB int32 + + // Memory for UVM. Defaults to true. For physical backed memory, set to + // false. + AllowOvercommit bool + + // Memory for UVM. Defaults to false. For virtual memory with deferred + // commit, set to true. + EnableDeferredCommit bool + + // ProcessorCount sets the number of vCPU's. If `0` will default to platform + // default. + ProcessorCount int32 +} + +// ID returns the ID of the VM's compute system. +func (uvm *UtilityVM) ID() string { + return uvm.hcsSystem.ID() +} + +// OS returns the operating system of the utility VM. +func (uvm *UtilityVM) OS() string { + return uvm.operatingSystem +} + +// Close terminates and releases resources associated with the utility VM. +func (uvm *UtilityVM) Close() error { + uvm.Terminate() + + // outputListener will only be nil for a Create -> Stop without a Start. In + // this case we have no goroutine processing output so its safe to close the + // channel here. + if uvm.outputListener != nil { + close(uvm.outputProcessingDone) + uvm.outputListener.Close() + uvm.outputListener = nil + } + err := uvm.hcsSystem.Close() + uvm.hcsSystem = nil + return err +} + +func defaultProcessorCount() int32 { + if runtime.NumCPU() == 1 { + return 1 + } + return 2 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go new file mode 100644 index 00000000..b9c261e8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go @@ -0,0 +1,378 @@ +package uvm + +import ( + "encoding/binary" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/mergemaps" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/schemaversion" + "github.com/Microsoft/hcsshim/internal/wclayer" + "github.com/Microsoft/hcsshim/osversion" + "github.com/linuxkit/virtsock/pkg/hvsock" + "github.com/sirupsen/logrus" +) + +type PreferredRootFSType int + +const ( + PreferredRootFSTypeInitRd PreferredRootFSType = iota + PreferredRootFSTypeVHD +) + +// OutputHandler is used to process the output from the program run in the UVM. +type OutputHandler func(io.Reader) + +const ( + // InitrdFile is the default file name for an initrd.img used to boot LCOW. + InitrdFile = "initrd.img" + // VhdFile is the default file name for a rootfs.vhd used to boot LCOW. + VhdFile = "rootfs.vhd" + // KernelFile is the default file name for a kernel used to boot LCOW. + KernelFile = "kernel" + // UncompressedKernelFile is the default file name for an uncompressed + // kernel used to boot LCOW with KernelDirect. + UncompressedKernelFile = "vmlinux" +) + +// OptionsLCOW are the set of options passed to CreateLCOW() to create a utility vm. +type OptionsLCOW struct { + *Options + + BootFilesPath string // Folder in which kernel and root file system reside. Defaults to \Program Files\Linux Containers + KernelFile string // Filename under `BootFilesPath` for the kernel. Defaults to `kernel` + KernelDirect bool // Skip UEFI and boot directly to `kernel` + RootFSFile string // Filename under `BootFilesPath` for the UVMs root file system. Defaults to `InitrdFile` + KernelBootOptions string // Additional boot options for the kernel + EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM + ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe + SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1. + UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true + ExecCommandLine string // The command line to exec from init. Defaults to GCS + ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false + ForwardStderr bool // Whether stderr will be forwarded from the executed program. Defaults to true + OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages + VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken. + VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`. + PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD` +} + +// NewDefaultOptionsLCOW creates the default options for a bootable version of +// LCOW. +// +// `id` the ID of the compute system. If not passed will generate a new GUID. +// +// `owner` the owner of the compute system. If not passed will use the +// executable files name. +func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW { + // Use KernelDirect boot by default on all builds that support it. + kernelDirectSupported := osversion.Get().Build >= 18286 + opts := &OptionsLCOW{ + Options: &Options{ + ID: id, + Owner: owner, + MemorySizeInMB: 1024, + AllowOvercommit: true, + EnableDeferredCommit: false, + ProcessorCount: defaultProcessorCount(), + }, + BootFilesPath: filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers"), + KernelFile: KernelFile, + KernelDirect: kernelDirectSupported, + RootFSFile: InitrdFile, + KernelBootOptions: "", + EnableGraphicsConsole: false, + ConsolePipe: "", + SCSIControllerCount: 1, + UseGuestConnection: true, + ExecCommandLine: fmt.Sprintf("/bin/gcs -log-format json -loglevel %s", logrus.StandardLogger().Level.String()), + ForwardStdout: false, + ForwardStderr: true, + OutputHandler: parseLogrus, + VPMemDeviceCount: DefaultVPMEMCount, + VPMemSizeBytes: DefaultVPMemSizeBytes, + PreferredRootFSType: PreferredRootFSTypeInitRd, + } + + if opts.ID == "" { + opts.ID = guid.New().String() + } + if opts.Owner == "" { + opts.Owner = filepath.Base(os.Args[0]) + } + + if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil { + // We have a rootfs.vhd in the boot files path. Use it over an initrd.img + opts.RootFSFile = VhdFile + opts.PreferredRootFSType = PreferredRootFSTypeVHD + } + + if kernelDirectSupported { + // KernelDirect supports uncompressed kernel if the kernel is present. + // Default to uncompressed if on box. NOTE: If `kernel` is already + // uncompressed and simply named 'kernel' it will still be used + // uncompressed automatically. + if _, err := os.Stat(filepath.Join(opts.BootFilesPath, UncompressedKernelFile)); err == nil { + opts.KernelFile = UncompressedKernelFile + } + } + return opts +} + +const linuxLogVsockPort = 109 + +// CreateLCOW creates an HCS compute system representing a utility VM. +func CreateLCOW(opts *OptionsLCOW) (_ *UtilityVM, err error) { + logrus.Debugf("uvm::CreateLCOW %+v", opts) + + // We dont serialize OutputHandler so if it is missing we need to put it back to the default. + if opts.OutputHandler == nil { + opts.OutputHandler = parseLogrus + } + + uvm := &UtilityVM{ + id: opts.ID, + owner: opts.Owner, + operatingSystem: "linux", + scsiControllerCount: opts.SCSIControllerCount, + vpmemMaxCount: opts.VPMemDeviceCount, + vpmemMaxSizeBytes: opts.VPMemSizeBytes, + } + + kernelFullPath := filepath.Join(opts.BootFilesPath, opts.KernelFile) + if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) { + return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath) + } + rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile) + if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) { + return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath) + } + + if opts.SCSIControllerCount > 1 { + return nil, fmt.Errorf("SCSI controller count must be 0 or 1") // Future extension here for up to 4 + } + if opts.VPMemDeviceCount > MaxVPMEMCount { + return nil, fmt.Errorf("vpmem device count cannot be greater than %d", MaxVPMEMCount) + } + if uvm.vpmemMaxCount > 0 { + if opts.VPMemSizeBytes%4096 != 0 { + return nil, fmt.Errorf("opts.VPMemSizeBytes must be a multiple of 4096") + } + } else { + if opts.PreferredRootFSType == PreferredRootFSTypeVHD { + return nil, fmt.Errorf("PreferredRootFSTypeVHD requires at least one VPMem device") + } + } + if opts.KernelDirect && osversion.Get().Build < 18286 { + return nil, fmt.Errorf("KernelDirectBoot is not support on builds older than 18286") + } + + doc := &hcsschema.ComputeSystem{ + Owner: uvm.owner, + SchemaVersion: schemaversion.SchemaV21(), + ShouldTerminateOnLastHandleClosed: true, + VirtualMachine: &hcsschema.VirtualMachine{ + StopOnReset: true, + Chipset: &hcsschema.Chipset{}, + ComputeTopology: &hcsschema.Topology{ + Memory: &hcsschema.Memory2{ + SizeInMB: opts.MemorySizeInMB, + AllowOvercommit: opts.AllowOvercommit, + EnableDeferredCommit: opts.EnableDeferredCommit, + }, + Processor: &hcsschema.Processor2{ + Count: opts.ProcessorCount, + }, + }, + Devices: &hcsschema.Devices{ + HvSocket: &hcsschema.HvSocket2{ + HvSocketConfig: &hcsschema.HvSocketSystemConfig{ + // Allow administrators and SYSTEM to bind to vsock sockets + // so that we can create a GCS log socket. + DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)", + }, + }, + }, + }, + } + + if opts.UseGuestConnection { + doc.VirtualMachine.GuestConnection = &hcsschema.GuestConnection{ + UseVsock: true, + UseConnectedSuspend: true, + } + } + + if uvm.scsiControllerCount > 0 { + // TODO: JTERRY75 - this should enumerate scsicount and add an entry per value. + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{ + "0": { + Attachments: make(map[string]hcsschema.Attachment), + }, + } + } + if uvm.vpmemMaxCount > 0 { + doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{ + MaximumCount: uvm.vpmemMaxCount, + MaximumSizeBytes: uvm.vpmemMaxSizeBytes, + } + } + + var kernelArgs string + switch opts.PreferredRootFSType { + case PreferredRootFSTypeInitRd: + if !opts.KernelDirect { + kernelArgs = "initrd=/" + opts.RootFSFile + } + case PreferredRootFSTypeVHD: + // Support for VPMem VHD(X) booting rather than initrd.. + kernelArgs = "root=/dev/pmem0 ro init=/init" + imageFormat := "Vhd1" + if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" { + imageFormat = "Vhdx" + } + doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{ + "0": { + HostPath: rootfsFullPath, + ReadOnly: true, + ImageFormat: imageFormat, + }, + } + if err := wclayer.GrantVmAccess(uvm.id, rootfsFullPath); err != nil { + return nil, fmt.Errorf("failed to grantvmaccess to %s: %s", rootfsFullPath, err) + } + // Add to our internal structure + uvm.vpmemDevices[0] = vpmemInfo{ + hostPath: opts.RootFSFile, + uvmPath: "/", + refCount: 1, + } + } + + vmDebugging := false + if opts.ConsolePipe != "" { + vmDebugging = true + kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200" + doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{ + "0": { // Which is actually COM1 + NamedPipe: opts.ConsolePipe, + }, + } + } else { + kernelArgs += " 8250_core.nr_uarts=0" + } + + if opts.EnableGraphicsConsole { + vmDebugging = true + kernelArgs += " console=tty" + doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{} + doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{} + doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{} + } + + if !vmDebugging { + // Terminate the VM if there is a kernel panic. + kernelArgs += " panic=-1 quiet" + } + + if opts.KernelBootOptions != "" { + kernelArgs += " " + opts.KernelBootOptions + } + + // With default options, run GCS with stderr pointing to the vsock port + // created below in order to forward guest logs to logrus. + initArgs := "/bin/vsockexec" + + if opts.ForwardStdout { + initArgs += fmt.Sprintf(" -o %d", linuxLogVsockPort) + } + + if opts.ForwardStderr { + initArgs += fmt.Sprintf(" -e %d", linuxLogVsockPort) + } + + initArgs += " " + opts.ExecCommandLine + + if vmDebugging { + // Launch a shell on the console. + initArgs = `sh -c "` + initArgs + ` & exec sh"` + } + + kernelArgs += ` pci=off brd.rd_nr=0 pmtmr=0 -- ` + initArgs + + if !opts.KernelDirect { + doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{ + BootThis: &hcsschema.UefiBootEntry{ + DevicePath: `\` + opts.KernelFile, + DeviceType: "VmbFs", + VmbFsRootPath: opts.BootFilesPath, + OptionalData: kernelArgs, + }, + } + } else { + doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{ + KernelFilePath: kernelFullPath, + KernelCmdLine: kernelArgs, + } + if opts.PreferredRootFSType == PreferredRootFSTypeInitRd { + doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath + } + } + + fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON)) + if err != nil { + return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err) + } + + hcsSystem, err := hcs.CreateComputeSystem(uvm.id, fullDoc) + if err != nil { + logrus.Debugln("failed to create UVM: ", err) + return nil, err + } + + uvm.hcsSystem = hcsSystem + defer func() { + if err != nil { + uvm.Close() + } + }() + + // Create a socket that the executed program can send to. This is usually + // used by GCS to send log data. + if opts.ForwardStdout || opts.ForwardStderr { + uvm.outputHandler = opts.OutputHandler + uvm.outputProcessingDone = make(chan struct{}) + uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort) + if err != nil { + return nil, err + } + } + + return uvm, nil +} + +func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) { + properties, err := uvm.hcsSystem.Properties() + if err != nil { + return nil, err + } + vmID, err := hvsock.GUIDFromString(properties.RuntimeID) + if err != nil { + return nil, err + } + serviceID, _ := hvsock.GUIDFromString("00000000-facb-11e6-bd58-64006a7986d3") + binary.LittleEndian.PutUint32(serviceID[0:4], port) + return hvsock.Listen(hvsock.Addr{VMID: vmID, ServiceID: serviceID}) +} + +// PMemMaxSizeBytes returns the maximum size of a PMEM layer (LCOW) +func (uvm *UtilityVM) PMemMaxSizeBytes() uint64 { + return uvm.vpmemMaxSizeBytes +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_test.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_test.go new file mode 100644 index 00000000..9e494dd0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_test.go @@ -0,0 +1,25 @@ +package uvm + +import ( + "testing" +) + +// Unit tests for negative testing of input to uvm.Create() + +func TestCreateBadBootFilesPath(t *testing.T) { + opts := NewDefaultOptionsLCOW(t.Name(), "") + opts.BootFilesPath = `c:\does\not\exist\I\hope` + + _, err := CreateLCOW(opts) + if err == nil || err.Error() != `kernel: 'c:\does\not\exist\I\hope\kernel' not found` { + t.Fatal(err) + } +} + +func TestCreateWCOWBadLayerFolders(t *testing.T) { + opts := NewDefaultOptionsWCOW(t.Name(), "") + _, err := CreateWCOW(opts) + if err == nil || (err != nil && err.Error() != `at least 2 LayerFolders must be supplied`) { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go new file mode 100644 index 00000000..9d5e61c9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go @@ -0,0 +1,186 @@ +package uvm + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/mergemaps" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/schemaversion" + "github.com/Microsoft/hcsshim/internal/uvmfolder" + "github.com/Microsoft/hcsshim/internal/wcow" + "github.com/sirupsen/logrus" +) + +// OptionsWCOW are the set of options passed to CreateWCOW() to create a utility vm. +type OptionsWCOW struct { + *Options + + LayerFolders []string // Set of folders for base layers and scratch. Ordered from top most read-only through base read-only layer, followed by scratch +} + +// NewDefaultOptionsWCOW creates the default options for a bootable version of +// WCOW. The caller `MUST` set the `LayerFolders` path on the returned value. +// +// `id` the ID of the compute system. If not passed will generate a new GUID. +// +// `owner` the owner of the compute system. If not passed will use the +// executable files name. +func NewDefaultOptionsWCOW(id, owner string) *OptionsWCOW { + opts := &OptionsWCOW{ + Options: &Options{ + ID: id, + Owner: owner, + MemorySizeInMB: 1024, + AllowOvercommit: true, + EnableDeferredCommit: false, + ProcessorCount: defaultProcessorCount(), + }, + } + + if opts.ID == "" { + opts.ID = guid.New().String() + } + if opts.Owner == "" { + opts.Owner = filepath.Base(os.Args[0]) + } + + return opts +} + +// CreateWCOW creates an HCS compute system representing a utility VM. +// +// WCOW Notes: +// - The scratch is always attached to SCSI 0:0 +// +func CreateWCOW(opts *OptionsWCOW) (_ *UtilityVM, err error) { + logrus.Debugf("uvm::CreateWCOW %+v", opts) + + if opts.Options == nil { + opts.Options = &Options{} + } + + uvm := &UtilityVM{ + id: opts.ID, + owner: opts.Owner, + operatingSystem: "windows", + scsiControllerCount: 1, + vsmbShares: make(map[string]*vsmbShare), + } + + if len(opts.LayerFolders) < 2 { + return nil, fmt.Errorf("at least 2 LayerFolders must be supplied") + } + uvmFolder, err := uvmfolder.LocateUVMFolder(opts.LayerFolders) + if err != nil { + return nil, fmt.Errorf("failed to locate utility VM folder from layer folders: %s", err) + } + + // TODO: BUGBUG Remove this. @jhowardmsft + // It should be the responsiblity of the caller to do the creation and population. + // - Update runhcs too (vm.go). + // - Remove comment in function header + // - Update tests that rely on this current behaviour. + // Create the RW scratch in the top-most layer folder, creating the folder if it doesn't already exist. + scratchFolder := opts.LayerFolders[len(opts.LayerFolders)-1] + logrus.Debugf("uvm::CreateWCOW scratch folder: %s", scratchFolder) + + // Create the directory if it doesn't exist + if _, err := os.Stat(scratchFolder); os.IsNotExist(err) { + logrus.Debugf("uvm::CreateWCOW creating folder: %s ", scratchFolder) + if err := os.MkdirAll(scratchFolder, 0777); err != nil { + return nil, fmt.Errorf("failed to create utility VM scratch folder: %s", err) + } + } + + // Create sandbox.vhdx in the scratch folder based on the template, granting the correct permissions to it + scratchPath := filepath.Join(scratchFolder, "sandbox.vhdx") + if _, err := os.Stat(scratchPath); os.IsNotExist(err) { + if err := wcow.CreateUVMScratch(uvmFolder, scratchFolder, uvm.id); err != nil { + return nil, fmt.Errorf("failed to create scratch: %s", err) + } + } + + doc := &hcsschema.ComputeSystem{ + Owner: uvm.owner, + SchemaVersion: schemaversion.SchemaV21(), + ShouldTerminateOnLastHandleClosed: true, + VirtualMachine: &hcsschema.VirtualMachine{ + StopOnReset: true, + Chipset: &hcsschema.Chipset{ + Uefi: &hcsschema.Uefi{ + BootThis: &hcsschema.UefiBootEntry{ + DevicePath: `\EFI\Microsoft\Boot\bootmgfw.efi`, + DeviceType: "VmbFs", + }, + }, + }, + ComputeTopology: &hcsschema.Topology{ + Memory: &hcsschema.Memory2{ + SizeInMB: opts.MemorySizeInMB, + AllowOvercommit: opts.AllowOvercommit, + // EnableHotHint is not compatible with physical. + EnableHotHint: opts.AllowOvercommit, + EnableDeferredCommit: opts.EnableDeferredCommit, + }, + Processor: &hcsschema.Processor2{ + Count: defaultProcessorCount(), + }, + }, + GuestConnection: &hcsschema.GuestConnection{}, + Devices: &hcsschema.Devices{ + Scsi: map[string]hcsschema.Scsi{ + "0": { + Attachments: map[string]hcsschema.Attachment{ + "0": { + Path: scratchPath, + Type_: "VirtualDisk", + }, + }, + }, + }, + HvSocket: &hcsschema.HvSocket2{ + HvSocketConfig: &hcsschema.HvSocketSystemConfig{ + // Allow administrators and SYSTEM to bind to vsock sockets + // so that we can create a GCS log socket. + DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)", + }, + }, + VirtualSmb: &hcsschema.VirtualSmb{ + DirectFileMappingInMB: 1024, // Sensible default, but could be a tuning parameter somewhere + Shares: []hcsschema.VirtualSmbShare{ + { + Name: "os", + Path: filepath.Join(uvmFolder, `UtilityVM\Files`), + Options: &hcsschema.VirtualSmbShareOptions{ + ReadOnly: true, + PseudoOplocks: true, + TakeBackupPrivilege: true, + CacheIo: true, + ShareRead: true, + }, + }, + }, + }, + }, + }, + } + + uvm.scsiLocations[0][0].hostPath = doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Path + + fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON)) + if err != nil { + return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err) + } + + hcsSystem, err := hcs.CreateComputeSystem(uvm.id, fullDoc) + if err != nil { + logrus.Debugln("failed to create UVM: ", err) + return nil, err + } + uvm.hcsSystem = hcsSystem + return uvm, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go new file mode 100644 index 00000000..5c6527a7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go @@ -0,0 +1,6 @@ +package uvm + +// Modifies the compute system by sending a request to HCS +func (uvm *UtilityVM) Modify(hcsModificationDocument interface{}) error { + return uvm.hcsSystem.Modify(hcsModificationDocument) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go new file mode 100644 index 00000000..3e54e482 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go @@ -0,0 +1,251 @@ +package uvm + +import ( + "fmt" + "path" + + "github.com/Microsoft/hcsshim/hcn" + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/hns" + "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/schema1" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/osversion" + "github.com/sirupsen/logrus" +) + +// AddNetNS adds network namespace inside the guest & adds endpoints to the guest on that namepace +func (uvm *UtilityVM) AddNetNS(id string, endpoints []*hns.HNSEndpoint) (err error) { + uvm.m.Lock() + defer uvm.m.Unlock() + ns := uvm.namespaces[id] + if ns == nil { + ns = &namespaceInfo{} + + if uvm.isNetworkNamespaceSupported() { + // Add a Guest Network namespace. On LCOW we add the adapters + // dynamically. + if uvm.operatingSystem == "windows" { + hcnNamespace, err := hcn.GetNamespaceByID(id) + if err != nil { + return err + } + guestNamespace := hcsschema.ModifySettingRequest{ + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeNetworkNamespace, + RequestType: requesttype.Add, + Settings: hcnNamespace, + }, + } + if err := uvm.Modify(&guestNamespace); err != nil { + return err + } + } + } + + defer func() { + if err != nil { + if e := uvm.removeNamespaceNICs(ns); e != nil { + logrus.Warnf("failed to undo NIC add: %v", e) + } + } + }() + for _, endpoint := range endpoints { + nicID := guid.New() + err = uvm.addNIC(nicID, endpoint) + if err != nil { + return err + } + ns.nics = append(ns.nics, nicInfo{nicID, endpoint}) + } + if uvm.namespaces == nil { + uvm.namespaces = make(map[string]*namespaceInfo) + } + uvm.namespaces[id] = ns + } + ns.refCount++ + return nil +} + +//RemoveNetNS removes the namespace information +func (uvm *UtilityVM) RemoveNetNS(id string) error { + uvm.m.Lock() + defer uvm.m.Unlock() + ns := uvm.namespaces[id] + if ns == nil || ns.refCount <= 0 { + panic(fmt.Errorf("removed a namespace that was not added: %s", id)) + } + + ns.refCount-- + + // Remove the Guest Network namespace + if uvm.isNetworkNamespaceSupported() { + if uvm.operatingSystem == "windows" { + hcnNamespace, err := hcn.GetNamespaceByID(id) + if err != nil { + return err + } + guestNamespace := hcsschema.ModifySettingRequest{ + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeNetworkNamespace, + RequestType: requesttype.Remove, + Settings: hcnNamespace, + }, + } + if err := uvm.Modify(&guestNamespace); err != nil { + return err + } + } + } + + var err error + if ns.refCount == 0 { + err = uvm.removeNamespaceNICs(ns) + delete(uvm.namespaces, id) + } + + return err +} + +// IsNetworkNamespaceSupported returns bool value specifying if network namespace is supported inside the guest +func (uvm *UtilityVM) isNetworkNamespaceSupported() bool { + p, err := uvm.ComputeSystem().Properties(schema1.PropertyTypeGuestConnection) + if err == nil { + return p.GuestConnectionInfo.GuestDefinedCapabilities.NamespaceAddRequestSupported + } + + return false +} + +func (uvm *UtilityVM) removeNamespaceNICs(ns *namespaceInfo) error { + for len(ns.nics) != 0 { + nic := ns.nics[len(ns.nics)-1] + err := uvm.removeNIC(nic.ID, nic.Endpoint) + if err != nil { + return err + } + ns.nics = ns.nics[:len(ns.nics)-1] + } + return nil +} + +func getNetworkModifyRequest(adapterID string, requestType string, settings interface{}) interface{} { + if osversion.Get().Build >= osversion.RS5 { + return guestrequest.NetworkModifyRequest{ + AdapterId: adapterID, + RequestType: requestType, + Settings: settings, + } + } + return guestrequest.RS4NetworkModifyRequest{ + AdapterInstanceId: adapterID, + RequestType: requestType, + Settings: settings, + } +} + +func (uvm *UtilityVM) addNIC(id guid.GUID, endpoint *hns.HNSEndpoint) error { + + // First a pre-add. This is a guest-only request and is only done on Windows. + if uvm.operatingSystem == "windows" { + preAddRequest := hcsschema.ModifySettingRequest{ + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeNetwork, + RequestType: requesttype.Add, + Settings: getNetworkModifyRequest( + id.String(), + requesttype.PreAdd, + endpoint), + }, + } + if err := uvm.Modify(&preAddRequest); err != nil { + return err + } + } + + // Then the Add itself + request := hcsschema.ModifySettingRequest{ + RequestType: requesttype.Add, + ResourcePath: path.Join("VirtualMachine/Devices/NetworkAdapters", id.String()), + Settings: hcsschema.NetworkAdapter{ + EndpointId: endpoint.Id, + MacAddress: endpoint.MacAddress, + }, + } + + if uvm.operatingSystem == "windows" { + request.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeNetwork, + RequestType: requesttype.Add, + Settings: getNetworkModifyRequest( + id.String(), + requesttype.Add, + nil), + } + } else { + // Verify this version of LCOW supports Network HotAdd + if uvm.isNetworkNamespaceSupported() { + request.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeNetwork, + RequestType: requesttype.Add, + Settings: &guestrequest.LCOWNetworkAdapter{ + NamespaceID: endpoint.Namespace.ID, + ID: id.String(), + MacAddress: endpoint.MacAddress, + IPAddress: endpoint.IPAddress.String(), + PrefixLength: endpoint.PrefixLength, + GatewayAddress: endpoint.GatewayAddress, + DNSSuffix: endpoint.DNSSuffix, + DNSServerList: endpoint.DNSServerList, + EnableLowMetric: endpoint.EnableLowMetric, + EncapOverhead: endpoint.EncapOverhead, + }, + } + } + } + + if err := uvm.Modify(&request); err != nil { + return err + } + + return nil +} + +func (uvm *UtilityVM) removeNIC(id guid.GUID, endpoint *hns.HNSEndpoint) error { + request := hcsschema.ModifySettingRequest{ + RequestType: requesttype.Remove, + ResourcePath: path.Join("VirtualMachine/Devices/NetworkAdapters", id.String()), + Settings: hcsschema.NetworkAdapter{ + EndpointId: endpoint.Id, + MacAddress: endpoint.MacAddress, + }, + } + + if uvm.operatingSystem == "windows" { + request.GuestRequest = hcsschema.ModifySettingRequest{ + RequestType: requesttype.Remove, + Settings: getNetworkModifyRequest( + id.String(), + requesttype.Remove, + nil), + } + } else { + // Verify this version of LCOW supports Network HotRemove + if uvm.isNetworkNamespaceSupported() { + request.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeNetwork, + RequestType: requesttype.Remove, + Settings: &guestrequest.LCOWNetworkAdapter{ + NamespaceID: endpoint.Namespace.ID, + ID: endpoint.Id, + }, + } + } + } + + if err := uvm.Modify(&request); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go new file mode 100644 index 00000000..7b64e2a4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go @@ -0,0 +1,136 @@ +package uvm + +import ( + "fmt" + + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/sirupsen/logrus" +) + +// AddPlan9 adds a Plan9 share to a utility VM. Each Plan9 share is ref-counted and +// only added if it isn't already. +func (uvm *UtilityVM) AddPlan9(hostPath string, uvmPath string, readOnly bool) error { + logrus.WithFields(logrus.Fields{ + logfields.UVMID: uvm.id, + "host-path": hostPath, + "uvm-path": uvmPath, + "readOnly": readOnly, + }).Debug("uvm::AddPlan9") + + if uvm.operatingSystem != "linux" { + return errNotSupported + } + if uvmPath == "" { + return fmt.Errorf("uvmPath must be passed to AddPlan9") + } + + // TODO: JTERRY75 - These are marked private in the schema. For now use them + // but when there are public variants we need to switch to them. + const ( + shareFlagsReadOnly int32 = 0x00000001 + shareFlagsLinuxMetadata int32 = 0x00000004 + shareFlagsCaseSensitive int32 = 0x00000008 + ) + + // TODO: JTERRY75 - `shareFlagsCaseSensitive` only works if the Windows + // `hostPath` supports case sensitivity. We need to detect this case before + // forwarding this flag in all cases. + flags := shareFlagsLinuxMetadata // | shareFlagsCaseSensitive + if readOnly { + flags |= shareFlagsReadOnly + } + + uvm.m.Lock() + defer uvm.m.Unlock() + if uvm.plan9Shares == nil { + uvm.plan9Shares = make(map[string]*plan9Info) + } + if _, ok := uvm.plan9Shares[hostPath]; !ok { + uvm.plan9Counter++ + + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Add, + Settings: hcsschema.Plan9Share{ + Name: fmt.Sprintf("%d", uvm.plan9Counter), + Path: hostPath, + Port: int32(uvm.plan9Counter), // TODO: Temporary. Will all use a single port (9999) + Flags: flags, + }, + ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Plan9/Shares"), + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeMappedDirectory, + RequestType: requesttype.Add, + Settings: guestrequest.LCOWMappedDirectory{ + MountPath: uvmPath, + Port: int32(uvm.plan9Counter), // TODO: Temporary. Will all use a single port (9999) + ReadOnly: readOnly, + }, + }, + } + + if err := uvm.Modify(modification); err != nil { + return err + } + uvm.plan9Shares[hostPath] = &plan9Info{ + refCount: 1, + uvmPath: uvmPath, + idCounter: uvm.plan9Counter, + port: int32(uvm.plan9Counter), // TODO: Temporary. Will all use a single port (9999) + } + } else { + uvm.plan9Shares[hostPath].refCount++ + } + logrus.Debugf("hcsshim::AddPlan9 Success %s: refcount=%d %+v", hostPath, uvm.plan9Shares[hostPath].refCount, uvm.plan9Shares[hostPath]) + return nil +} + +// RemovePlan9 removes a Plan9 share from a utility VM. Each Plan9 share is ref-counted +// and only actually removed when the ref-count drops to zero. +func (uvm *UtilityVM) RemovePlan9(hostPath string) error { + if uvm.operatingSystem != "linux" { + return errNotSupported + } + logrus.Debugf("uvm::RemovePlan9 %s id:%s", hostPath, uvm.id) + uvm.m.Lock() + defer uvm.m.Unlock() + if _, ok := uvm.plan9Shares[hostPath]; !ok { + return fmt.Errorf("%s is not present as a Plan9 share in %s, cannot remove", hostPath, uvm.id) + } + return uvm.removePlan9(hostPath, uvm.plan9Shares[hostPath].uvmPath) +} + +// removePlan9 is the internally callable "unsafe" version of RemovePlan9. The mutex +// MUST be held when calling this function. +func (uvm *UtilityVM) removePlan9(hostPath, uvmPath string) error { + uvm.plan9Shares[hostPath].refCount-- + if uvm.plan9Shares[hostPath].refCount > 0 { + logrus.Debugf("uvm::RemovePlan9 Success %s id:%s Ref-count now %d. It is still present in the utility VM", hostPath, uvm.id, uvm.plan9Shares[hostPath].refCount) + return nil + } + logrus.Debugf("uvm::RemovePlan9 Zero ref-count, removing. %s id:%s", hostPath, uvm.id) + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Remove, + Settings: hcsschema.Plan9Share{ + Name: fmt.Sprintf("%d", uvm.plan9Shares[hostPath].idCounter), + Port: uvm.plan9Shares[hostPath].port, + }, + ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Plan9/Shares"), + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeMappedDirectory, + RequestType: requesttype.Remove, + Settings: guestrequest.LCOWMappedDirectory{ + MountPath: uvm.plan9Shares[hostPath].uvmPath, + Port: uvm.plan9Shares[hostPath].port, + }, + }, + } + if err := uvm.Modify(modification); err != nil { + return fmt.Errorf("failed to remove plan9 share %s from %s: %+v: %s", hostPath, uvm.id, modification, err) + } + delete(uvm.plan9Shares, hostPath) + logrus.Debugf("uvm::RemovePlan9 Success %s id:%s successfully removed from utility VM", hostPath, uvm.id) + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go new file mode 100644 index 00000000..7788878f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go @@ -0,0 +1,318 @@ +package uvm + +import ( + "fmt" + + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/wclayer" + "github.com/sirupsen/logrus" +) + +var ( + ErrNoAvailableLocation = fmt.Errorf("no available location") + ErrNotAttached = fmt.Errorf("not attached") + ErrAlreadyAttached = fmt.Errorf("already attached") + ErrNoSCSIControllers = fmt.Errorf("no SCSI controllers configured for this utility VM") + ErrTooManyAttachments = fmt.Errorf("too many SCSI attachments") + ErrSCSILayerWCOWUnsupported = fmt.Errorf("SCSI attached layers are not supported for WCOW") +) + +// allocateSCSI finds the next available slot on the +// SCSI controllers associated with a utility VM to use. +// Lock must be held when calling this function +func (uvm *UtilityVM) allocateSCSI(hostPath string, uvmPath string, isLayer bool) (int, int32, error) { + for controller, luns := range uvm.scsiLocations { + for lun, si := range luns { + if si.hostPath == "" { + uvm.scsiLocations[controller][lun].hostPath = hostPath + uvm.scsiLocations[controller][lun].uvmPath = uvmPath + uvm.scsiLocations[controller][lun].isLayer = isLayer + if isLayer { + uvm.scsiLocations[controller][lun].refCount = 1 + } + logrus.Debugf("uvm::allocateSCSI %d:%d %q %q", controller, lun, hostPath, uvmPath) + return controller, int32(lun), nil + + } + } + } + return -1, -1, ErrNoAvailableLocation +} + +func (uvm *UtilityVM) deallocateSCSI(controller int, lun int32) error { + uvm.m.Lock() + defer uvm.m.Unlock() + logrus.Debugf("uvm::deallocateSCSI %d:%d %+v", controller, lun, uvm.scsiLocations[controller][lun]) + uvm.scsiLocations[controller][lun] = scsiInfo{} + return nil +} + +// Lock must be held when calling this function. +func (uvm *UtilityVM) findSCSIAttachment(findThisHostPath string) (int, int32, string, error) { + for controller, luns := range uvm.scsiLocations { + for lun, si := range luns { + if si.hostPath == findThisHostPath { + logrus.Debugf("uvm::findSCSIAttachment %d:%d %+v", controller, lun, si) + return controller, int32(lun), si.uvmPath, nil + } + } + } + return -1, -1, "", ErrNotAttached +} + +// AddSCSI adds a SCSI disk to a utility VM at the next available location. This +// function should be called for a RW/scratch layer or a passthrough vhd/vhdx. +// For read-only layers on LCOW as an alternate to PMEM for large layers, use +// AddSCSILayer instead. +// +// `hostPath` is required and must point to a vhd/vhdx path. +// +// `uvmPath` is optional. +// +// `readOnly` set to `true` if the vhd/vhdx should be attached read only. +func (uvm *UtilityVM) AddSCSI(hostPath string, uvmPath string, readOnly bool) (int, int32, error) { + logrus.WithFields(logrus.Fields{ + logfields.UVMID: uvm.id, + "host-path": hostPath, + "uvm-path": uvmPath, + "readOnly": readOnly, + }).Debug("uvm::AddSCSI") + + return uvm.addSCSIActual(hostPath, uvmPath, "VirtualDisk", false, readOnly) +} + +// AddSCSIPhysicalDisk attaches a physical disk from the host directly to the +// Utility VM at the next available location. +// +// `hostPath` is required and `likely` start's with `\\.\PHYSICALDRIVE`. +// +// `uvmPath` is optional if a guest mount is not requested. +// +// `readOnly` set to `true` if the physical disk should be attached read only. +func (uvm *UtilityVM) AddSCSIPhysicalDisk(hostPath, uvmPath string, readOnly bool) (int, int32, error) { + logrus.WithFields(logrus.Fields{ + logfields.UVMID: uvm.id, + "host-path": hostPath, + "uvm-path": uvmPath, + "readOnly": readOnly, + }).Debug("uvm::AddSCSIPhysicalDisk") + + return uvm.addSCSIActual(hostPath, uvmPath, "PassThru", false, readOnly) +} + +// AddSCSILayer adds a read-only layer disk to a utility VM at the next available +// location. This function is used by LCOW as an alternate to PMEM for large layers. +// The UVMPath will always be /tmp/S/. +func (uvm *UtilityVM) AddSCSILayer(hostPath string) (int, int32, error) { + logrus.WithFields(logrus.Fields{ + logfields.UVMID: uvm.id, + "host-path": hostPath, + }).Debug("uvm::AddSCSILayer") + + if uvm.operatingSystem == "windows" { + return -1, -1, ErrSCSILayerWCOWUnsupported + } + + return uvm.addSCSIActual(hostPath, "", "VirtualDisk", true, true) +} + +// addSCSIActual is the implementation behind the external functions AddSCSI and +// AddSCSILayer. +// +// We are in control of everything ourselves. Hence we have ref- counting and +// so-on tracking what SCSI locations are available or used. +// +// `hostPath` is required and may be a vhd/vhdx or physical disk path. +// +// `uvmPath` is optional, and `must` be empty for layers. If `!isLayer` and +// `uvmPath` is empty no guest modify will take place. +// +// `attachmentType` is required and `must` be `VirtualDisk` for vhd/vhdx +// attachments and `PassThru` for physical disk. +// +// `isLayer` indicates that this is a read-only (LCOW) layer VHD. This parameter +// `must not` be used for Windows. +// +// `readOnly` indicates the attachment should be added read only. +// +// Returns the controller ID (0..3) and LUN (0..63) where the disk is attached. +func (uvm *UtilityVM) addSCSIActual(hostPath, uvmPath, attachmentType string, isLayer, readOnly bool) (int, int32, error) { + if uvm.scsiControllerCount == 0 { + return -1, -1, ErrNoSCSIControllers + } + + // Ensure the utility VM has access + if err := wclayer.GrantVmAccess(uvm.ID(), hostPath); err != nil { + return -1, -1, err + } + + // We must hold the lock throughout the lookup (findSCSIAttachment) until + // after the possible allocation (allocateSCSI) has been completed to ensure + // there isn't a race condition for it being attached by another thread between + // these two operations. All failure paths between these two must release + // the lock. + uvm.m.Lock() + if controller, lun, _, err := uvm.findSCSIAttachment(hostPath); err == nil { + // So is attached + if isLayer { + // Increment the refcount + uvm.scsiLocations[controller][lun].refCount++ + logrus.Debugf("uvm::AddSCSI id:%s hostPath:%s refCount now %d", uvm.id, hostPath, uvm.scsiLocations[controller][lun].refCount) + uvm.m.Unlock() + return controller, int32(lun), nil + } + + uvm.m.Unlock() + return -1, -1, ErrAlreadyAttached + } + + // At this point, we know it's not attached, regardless of whether it's a + // ref-counted layer VHD, or not. + controller, lun, err := uvm.allocateSCSI(hostPath, uvmPath, isLayer) + if err != nil { + uvm.m.Unlock() + return -1, -1, err + } + + // Auto-generate the UVM path for LCOW layers + if isLayer { + uvmPath = fmt.Sprintf("/tmp/S%d/%d", controller, lun) + } + + // See comment higher up. Now safe to release the lock. + uvm.m.Unlock() + + // Note: Can remove this check post-RS5 if multiple controllers are supported + if controller > 0 { + uvm.deallocateSCSI(controller, lun) + return -1, -1, ErrTooManyAttachments + } + + SCSIModification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Add, + Settings: hcsschema.Attachment{ + Path: hostPath, + Type_: attachmentType, + ReadOnly: readOnly, + }, + ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Scsi/%d/Attachments/%d", controller, lun), + } + + if uvmPath != "" { + if uvm.operatingSystem == "windows" { + SCSIModification.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeMappedVirtualDisk, + RequestType: requesttype.Add, + Settings: guestrequest.WCOWMappedVirtualDisk{ + ContainerPath: uvmPath, + Lun: lun, + }, + } + } else { + SCSIModification.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeMappedVirtualDisk, + RequestType: requesttype.Add, + Settings: guestrequest.LCOWMappedVirtualDisk{ + MountPath: uvmPath, + Lun: uint8(lun), + Controller: uint8(controller), + ReadOnly: readOnly, + }, + } + } + } + + if err := uvm.Modify(SCSIModification); err != nil { + uvm.deallocateSCSI(controller, lun) + return -1, -1, fmt.Errorf("uvm::AddSCSI: failed to modify utility VM configuration: %s", err) + } + logrus.Debugf("uvm::AddSCSI id:%s hostPath:%s added at %d:%d", uvm.id, hostPath, controller, lun) + return controller, int32(lun), nil + +} + +// RemoveSCSI removes a SCSI disk from a utility VM. As an external API, it +// is "safe". Internal use can call removeSCSI. +func (uvm *UtilityVM) RemoveSCSI(hostPath string) error { + uvm.m.Lock() + defer uvm.m.Unlock() + + if uvm.scsiControllerCount == 0 { + return ErrNoSCSIControllers + } + + // Make sure is actually attached + controller, lun, uvmPath, err := uvm.findSCSIAttachment(hostPath) + if err != nil { + return err + } + + if uvm.scsiLocations[controller][lun].isLayer { + uvm.scsiLocations[controller][lun].refCount-- + if uvm.scsiLocations[controller][lun].refCount > 0 { + logrus.Debugf("uvm::RemoveSCSI: refCount now %d: %s %s %d:%d", uvm.scsiLocations[controller][lun].refCount, hostPath, uvm.id, controller, lun) + return nil + } + } + + if err := uvm.removeSCSI(hostPath, uvmPath, controller, lun); err != nil { + return fmt.Errorf("failed to remove SCSI disk %s from container %s: %s", hostPath, uvm.id, err) + + } + return nil +} + +// removeSCSI is the internally callable "unsafe" version of RemoveSCSI. The mutex +// MUST be held when calling this function. +func (uvm *UtilityVM) removeSCSI(hostPath string, uvmPath string, controller int, lun int32) error { + logrus.Debugf("uvm::RemoveSCSI id:%s hostPath:%s", uvm.id, hostPath) + scsiModification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Remove, + ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Scsi/%d/Attachments/%d", controller, lun), + } + + // Include the GuestRequest so that the GCS ejects the disk cleanly if the disk was attached/mounted + if uvmPath != "" { + if uvm.operatingSystem == "windows" { + scsiModification.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeMappedVirtualDisk, + RequestType: requesttype.Remove, + Settings: guestrequest.WCOWMappedVirtualDisk{ + ContainerPath: uvmPath, + Lun: lun, + }, + } + } else { + scsiModification.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeMappedVirtualDisk, + RequestType: requesttype.Remove, + Settings: guestrequest.LCOWMappedVirtualDisk{ + MountPath: uvmPath, // May be blank in attach-only + Lun: uint8(lun), + Controller: uint8(controller), + }, + } + } + } + + if err := uvm.Modify(scsiModification); err != nil { + return err + } + uvm.scsiLocations[controller][lun] = scsiInfo{} + logrus.Debugf("uvm::RemoveSCSI: Success %s removed from %s %d:%d", hostPath, uvm.id, controller, lun) + return nil +} + +// GetScsiUvmPath returns the guest mounted path of a SCSI drive. +// +// If `hostPath` is not mounted returns `ErrNotAttached`. +func (uvm *UtilityVM) GetScsiUvmPath(hostPath string) (string, error) { + uvm.m.Lock() + defer uvm.m.Unlock() + + _, _, uvmPath, err := uvm.findSCSIAttachment(hostPath) + return uvmPath, err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go new file mode 100644 index 00000000..93549308 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go @@ -0,0 +1,98 @@ +package uvm + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "net" + "syscall" + + "github.com/sirupsen/logrus" +) + +const _ERROR_CONNECTION_ABORTED syscall.Errno = 1236 + +var _ = (OutputHandler)(parseLogrus) + +func parseLogrus(r io.Reader) { + j := json.NewDecoder(r) + logger := logrus.StandardLogger() + for { + e := logrus.Entry{Logger: logger} + err := j.Decode(&e.Data) + if err == io.EOF || err == _ERROR_CONNECTION_ABORTED { + break + } + if err != nil { + // Something went wrong. Read the rest of the data as a single + // string and log it at once -- it's probably a GCS panic stack. + logrus.Error("gcs log read: ", err) + rest, _ := ioutil.ReadAll(io.MultiReader(j.Buffered(), r)) + if len(rest) != 0 { + logrus.Error("gcs stderr: ", string(rest)) + } + break + } + msg := e.Data["msg"] + delete(e.Data, "msg") + lvl := e.Data["level"] + delete(e.Data, "level") + e.Data["vm.time"] = e.Data["time"] + delete(e.Data, "time") + switch lvl { + case "debug": + e.Debug(msg) + case "info": + e.Info(msg) + case "warning": + e.Warning(msg) + case "error", "fatal": + e.Error(msg) + default: + e.Info(msg) + } + } +} + +type acceptResult struct { + c net.Conn + err error +} + +func processOutput(ctx context.Context, l net.Listener, doneChan chan struct{}, handler OutputHandler) { + defer close(doneChan) + + ch := make(chan acceptResult) + go func() { + c, err := l.Accept() + ch <- acceptResult{c, err} + }() + + select { + case <-ctx.Done(): + l.Close() + return + case ar := <-ch: + c, err := ar.c, ar.err + l.Close() + if err != nil { + logrus.Error("accepting log socket: ", err) + return + } + defer c.Close() + + handler(c) + } +} + +// Start synchronously starts the utility VM. +func (uvm *UtilityVM) Start() error { + if uvm.outputListener != nil { + ctx, cancel := context.WithCancel(context.Background()) + go processOutput(ctx, uvm.outputListener, uvm.outputProcessingDone, uvm.outputHandler) + uvm.outputProcessingCancel = cancel + uvm.outputListener = nil + } + return uvm.hcsSystem.Start() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/system.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/system.go new file mode 100644 index 00000000..f82e1c4f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/system.go @@ -0,0 +1,7 @@ +package uvm + +import "github.com/Microsoft/hcsshim/internal/hcs" + +func (uvm *UtilityVM) ComputeSystem() *hcs.System { + return uvm.hcsSystem +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/terminate.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/terminate.go new file mode 100644 index 00000000..36f0934f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/terminate.go @@ -0,0 +1,7 @@ +package uvm + +// Terminate requests a utility VM terminate. If IsPending() on the error returned is true, +// it may not actually be shut down until Wait() succeeds. +func (uvm *UtilityVM) Terminate() error { + return uvm.hcsSystem.Terminate() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go new file mode 100644 index 00000000..2ee16788 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go @@ -0,0 +1,105 @@ +package uvm + +// This package describes the external interface for utility VMs. + +import ( + "context" + "net" + "sync" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hns" +) + +// | WCOW | LCOW +// Container scratch | SCSI | SCSI +// Scratch space | ---- | SCSI // For file system utilities. /tmp/scratch +// Read-Only Layer | VSMB | VPMEM +// Mapped Directory | VSMB | PLAN9 + +// vsmbShare is an internal structure used for ref-counting VSMB shares mapped to a Windows utility VM. +type vsmbShare struct { + refCount uint32 + name string + guestRequest interface{} +} + +// scsiInfo is an internal structure used for determining what is mapped to a utility VM. +// hostPath is required. uvmPath may be blank. +type scsiInfo struct { + hostPath string + uvmPath string + + // While most VHDs attached to SCSI are scratch spaces, in the case of LCOW + // when the size is over the size possible to attach to PMEM, we use SCSI for + // read-only layers. As RO layers are shared, we perform ref-counting. + isLayer bool + refCount uint32 +} + +// vpmemInfo is an internal structure used for determining VPMem devices mapped to +// a Linux utility VM. +type vpmemInfo struct { + hostPath string + uvmPath string + refCount uint32 +} + +// plan9Info is an internal structure used for ref-counting Plan9 shares mapped to a Linux utility VM. +type plan9Info struct { + refCount uint32 + idCounter uint64 + uvmPath string + port int32 // Temporary. TODO Remove +} +type nicInfo struct { + ID guid.GUID + Endpoint *hns.HNSEndpoint +} + +type namespaceInfo struct { + nics []nicInfo + refCount int +} + +// UtilityVM is the object used by clients representing a utility VM +type UtilityVM struct { + id string // Identifier for the utility VM (user supplied or generated) + owner string // Owner for the utility VM (user supplied or generated) + operatingSystem string // "windows" or "linux" + hcsSystem *hcs.System // The handle to the compute system + m sync.Mutex // Lock for adding/removing devices + + // containerCounter is the current number of containers that have been + // created. This is never decremented in the life of the UVM. + // + // NOTE: All accesses to this MUST be done atomically. + containerCounter uint64 + + // VSMB shares that are mapped into a Windows UVM. These are used for read-only + // layers and mapped directories + vsmbShares map[string]*vsmbShare + vsmbCounter uint64 // Counter to generate a unique share name for each VSMB share. + + // VPMEM devices that are mapped into a Linux UVM. These are used for read-only layers, or for + // booting from VHD. + vpmemDevices [MaxVPMEMCount]vpmemInfo // Limited by ACPI size. + vpmemMaxCount uint32 // Actual number of VPMem devices + vpmemMaxSizeBytes uint64 // Actual size of VPMem devices + + // SCSI devices that are mapped into a Windows or Linux utility VM + scsiLocations [4][64]scsiInfo // Hyper-V supports 4 controllers, 64 slots per controller. Limited to 1 controller for now though. + scsiControllerCount uint32 // Number of SCSI controllers in the utility VM + + // Plan9 are directories mapped into a Linux utility VM + plan9Shares map[string]*plan9Info + plan9Counter uint64 // Each newly-added plan9 share has a counter used as its ID in the ResourceURI and for the name + + namespaces map[string]*namespaceInfo + + outputListener net.Listener + outputProcessingDone chan struct{} + outputHandler OutputHandler + outputProcessingCancel context.CancelFunc +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go new file mode 100644 index 00000000..13f59ce8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go @@ -0,0 +1,170 @@ +package uvm + +import ( + "fmt" + + "github.com/Microsoft/hcsshim/internal/guestrequest" + "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/internal/wclayer" + "github.com/sirupsen/logrus" +) + +// allocateVPMEM finds the next available VPMem slot. The lock MUST be held +// when calling this function. +func (uvm *UtilityVM) allocateVPMEM(hostPath string) (uint32, error) { + for index, vi := range uvm.vpmemDevices { + if vi.hostPath == "" { + vi.hostPath = hostPath + logrus.Debugf("uvm::allocateVPMEM %d %q", index, hostPath) + return uint32(index), nil + } + } + return 0, fmt.Errorf("no free VPMEM locations") +} + +func (uvm *UtilityVM) deallocateVPMEM(deviceNumber uint32) error { + uvm.m.Lock() + defer uvm.m.Unlock() + uvm.vpmemDevices[deviceNumber] = vpmemInfo{} + return nil +} + +// Lock must be held when calling this function +func (uvm *UtilityVM) findVPMEMDevice(findThisHostPath string) (uint32, string, error) { + for deviceNumber, vi := range uvm.vpmemDevices { + if vi.hostPath == findThisHostPath { + logrus.Debugf("uvm::findVPMEMDeviceNumber %d %s", deviceNumber, findThisHostPath) + return uint32(deviceNumber), vi.uvmPath, nil + } + } + return 0, "", fmt.Errorf("%s is not attached to VPMEM", findThisHostPath) +} + +// AddVPMEM adds a VPMEM disk to a utility VM at the next available location. +// +// Returns the location(0..MaxVPMEM-1) where the device is attached, and if exposed, +// the utility VM path which will be /tmp/p// +func (uvm *UtilityVM) AddVPMEM(hostPath string, expose bool) (uint32, string, error) { + if uvm.operatingSystem != "linux" { + return 0, "", errNotSupported + } + + logrus.Debugf("uvm::AddVPMEM id:%s hostPath:%s expose:%t", uvm.id, hostPath, expose) + + uvm.m.Lock() + defer uvm.m.Unlock() + + var deviceNumber uint32 + var err error + uvmPath := "" + + deviceNumber, uvmPath, err = uvm.findVPMEMDevice(hostPath) + if err != nil { + // Ensure the utility VM has access + if err := wclayer.GrantVmAccess(uvm.ID(), hostPath); err != nil { + return 0, "", err + } + + // It doesn't exist, so we're going to allocate and hot-add it + deviceNumber, err = uvm.allocateVPMEM(hostPath) + if err != nil { + return 0, "", err + } + + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Add, + Settings: hcsschema.VirtualPMemDevice{ + HostPath: hostPath, + ReadOnly: true, + ImageFormat: "Vhd1", + }, + ResourcePath: fmt.Sprintf("VirtualMachine/Devices/VirtualPMem/Devices/%d", deviceNumber), + } + + if expose { + uvmPath = fmt.Sprintf("/tmp/p%d", deviceNumber) + modification.GuestRequest = guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeVPMemDevice, + RequestType: requesttype.Add, + Settings: guestrequest.LCOWMappedVPMemDevice{ + DeviceNumber: deviceNumber, + MountPath: uvmPath, + }, + } + } + + if err := uvm.Modify(modification); err != nil { + uvm.vpmemDevices[deviceNumber] = vpmemInfo{} + return 0, "", fmt.Errorf("uvm::AddVPMEM: failed to modify utility VM configuration: %s", err) + } + + uvm.vpmemDevices[deviceNumber] = vpmemInfo{ + hostPath: hostPath, + refCount: 1, + uvmPath: uvmPath} + } else { + pmemi := vpmemInfo{ + hostPath: hostPath, + refCount: uvm.vpmemDevices[deviceNumber].refCount + 1, + uvmPath: uvmPath} + uvm.vpmemDevices[deviceNumber] = pmemi + } + logrus.Debugf("hcsshim::AddVPMEM id:%s Success %+v", uvm.id, uvm.vpmemDevices[deviceNumber]) + return deviceNumber, uvmPath, nil + +} + +// RemoveVPMEM removes a VPMEM disk from a utility VM. As an external API, it +// is "safe". Internal use can call removeVPMEM. +func (uvm *UtilityVM) RemoveVPMEM(hostPath string) error { + if uvm.operatingSystem != "linux" { + return errNotSupported + } + + uvm.m.Lock() + defer uvm.m.Unlock() + + // Make sure is actually attached + deviceNumber, uvmPath, err := uvm.findVPMEMDevice(hostPath) + if err != nil { + return fmt.Errorf("cannot remove VPMEM %s as it is not attached to utility VM %s: %s", hostPath, uvm.id, err) + } + + if err := uvm.removeVPMEM(hostPath, uvmPath, deviceNumber); err != nil { + return fmt.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err) + } + return nil +} + +// removeVPMEM is the internally callable "unsafe" version of RemoveVPMEM. The mutex +// MUST be held when calling this function. +func (uvm *UtilityVM) removeVPMEM(hostPath string, uvmPath string, deviceNumber uint32) error { + logrus.Debugf("uvm::RemoveVPMEM id:%s hostPath:%s device:%d", uvm.id, hostPath, deviceNumber) + + if uvm.vpmemDevices[deviceNumber].refCount == 1 { + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Remove, + ResourcePath: fmt.Sprintf("VirtualMachine/Devices/VirtualPMem/Devices/%d", deviceNumber), + GuestRequest: guestrequest.GuestRequest{ + ResourceType: guestrequest.ResourceTypeVPMemDevice, + RequestType: requesttype.Remove, + Settings: guestrequest.LCOWMappedVPMemDevice{ + DeviceNumber: deviceNumber, + MountPath: uvmPath, + }, + }, + } + + if err := uvm.Modify(modification); err != nil { + return err + } + uvm.vpmemDevices[deviceNumber] = vpmemInfo{} + logrus.Debugf("uvm::RemoveVPMEM: Success id:%s hostPath:%s device:%d", uvm.id, hostPath, deviceNumber) + return nil + } + uvm.vpmemDevices[deviceNumber].refCount-- + logrus.Debugf("uvm::RemoveVPMEM: Success id:%s hostPath:%s device:%d refCount:%d", uvm.id, hostPath, deviceNumber, uvm.vpmemDevices[deviceNumber].refCount) + return nil + +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go new file mode 100644 index 00000000..e8fcae6b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go @@ -0,0 +1,112 @@ +package uvm + +import ( + "fmt" + "strconv" + + "github.com/Microsoft/hcsshim/internal/requesttype" + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/sirupsen/logrus" +) + +// findVSMBShare finds a share by `hostPath`. If not found returns `ErrNotAttached`. +func (uvm *UtilityVM) findVSMBShare(hostPath string) (*vsmbShare, error) { + share, ok := uvm.vsmbShares[hostPath] + if !ok { + return nil, ErrNotAttached + } + return share, nil +} + +func (share *vsmbShare) GuestPath() string { + return `\\?\VMSMB\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\` + share.name +} + +// AddVSMB adds a VSMB share to a Windows utility VM. Each VSMB share is ref-counted and +// only added if it isn't already. This is used for read-only layers, mapped directories +// to a container, and for mapped pipes. +func (uvm *UtilityVM) AddVSMB(hostPath string, guestRequest interface{}, options *hcsschema.VirtualSmbShareOptions) error { + if uvm.operatingSystem != "windows" { + return errNotSupported + } + + logrus.Debugf("uvm::AddVSMB %s %+v %+v id:%s", hostPath, guestRequest, options, uvm.id) + uvm.m.Lock() + defer uvm.m.Unlock() + share, err := uvm.findVSMBShare(hostPath) + if err == ErrNotAttached { + uvm.vsmbCounter++ + shareName := "s" + strconv.FormatUint(uvm.vsmbCounter, 16) + + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Add, + Settings: hcsschema.VirtualSmbShare{ + Name: shareName, + Options: options, + Path: hostPath, + }, + ResourcePath: "VirtualMachine/Devices/VirtualSmb/Shares", + } + + if err := uvm.Modify(modification); err != nil { + return err + } + share = &vsmbShare{ + name: shareName, + guestRequest: guestRequest, + } + uvm.vsmbShares[hostPath] = share + } + share.refCount++ + logrus.Debugf("hcsshim::AddVSMB Success %s: refcount=%d %+v", hostPath, share.refCount, share) + return nil +} + +// RemoveVSMB removes a VSMB share from a utility VM. Each VSMB share is ref-counted +// and only actually removed when the ref-count drops to zero. +func (uvm *UtilityVM) RemoveVSMB(hostPath string) error { + if uvm.operatingSystem != "windows" { + return errNotSupported + } + logrus.Debugf("uvm::RemoveVSMB %s id:%s", hostPath, uvm.id) + uvm.m.Lock() + defer uvm.m.Unlock() + share, err := uvm.findVSMBShare(hostPath) + if err != nil { + return fmt.Errorf("%s is not present as a VSMB share in %s, cannot remove", hostPath, uvm.id) + } + + share.refCount-- + if share.refCount > 0 { + logrus.Debugf("uvm::RemoveVSMB Success %s id:%s Ref-count now %d. It is still present in the utility VM", hostPath, uvm.id, share.refCount) + return nil + } + logrus.Debugf("uvm::RemoveVSMB Zero ref-count, removing. %s id:%s", hostPath, uvm.id) + modification := &hcsschema.ModifySettingRequest{ + RequestType: requesttype.Remove, + Settings: hcsschema.VirtualSmbShare{Name: share.name}, + ResourcePath: "VirtualMachine/Devices/VirtualSmb/Shares", + } + if err := uvm.Modify(modification); err != nil { + return fmt.Errorf("failed to remove vsmb share %s from %s: %+v: %s", hostPath, uvm.id, modification, err) + } + logrus.Debugf("uvm::RemoveVSMB Success %s id:%s successfully removed from utility VM", hostPath, uvm.id) + delete(uvm.vsmbShares, hostPath) + return nil +} + +// GetVSMBUvmPath returns the guest path of a VSMB mount. +func (uvm *UtilityVM) GetVSMBUvmPath(hostPath string) (string, error) { + if hostPath == "" { + return "", fmt.Errorf("no hostPath passed to GetVSMBUvmPath") + } + uvm.m.Lock() + defer uvm.m.Unlock() + share, err := uvm.findVSMBShare(hostPath) + if err != nil { + return "", err + } + path := share.GuestPath() + logrus.Debugf("uvm::GetVSMBUvmPath Success %s id:%s path:%s", hostPath, uvm.id, path) + return path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go b/vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go new file mode 100644 index 00000000..aee387a8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go @@ -0,0 +1,46 @@ +package uvm + +import ( + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/sirupsen/logrus" +) + +func (uvm *UtilityVM) waitForOutput() { + logrus.WithField(logfields.UVMID, uvm.ID()). + Debug("UVM exited, waiting for output processing to complete") + if uvm.outputProcessingDone != nil { + <-uvm.outputProcessingDone + } +} + +// Waits synchronously waits for a utility VM to terminate. +func (uvm *UtilityVM) Wait() error { + err := uvm.hcsSystem.Wait() + + // outputProcessingCancel will only cancel waiting for the vsockexec + // connection, it won't stop output processing once the connection is + // established. + if uvm.outputProcessingCancel != nil { + uvm.outputProcessingCancel() + } + uvm.waitForOutput() + + return err +} + +// WaitExpectedError synchronously waits for a utility VM to terminate. If the +// UVM terminates successfully, or if the given error is encountered internally +// during the wait, this function returns nil. +func (uvm *UtilityVM) WaitExpectedError(expected error) error { + err := uvm.hcsSystem.WaitExpectedError(expected) + + // outputProcessingCancel will only cancel waiting for the vsockexec + // connection, it won't stop output processing once the connection is + // established. + if uvm.outputProcessingCancel != nil { + uvm.outputProcessingCancel() + } + uvm.waitForOutput() + + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/locate.go b/vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/locate.go new file mode 100644 index 00000000..654fab69 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/locate.go @@ -0,0 +1,35 @@ +package uvmfolder + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/sirupsen/logrus" +) + +// LocateUVMFolder searches a set of layer folders to determine the "uppermost" +// layer which has a utility VM image. The order of the layers is (for historical) reasons +// Read-only-layers followed by an optional read-write layer. The RO layers are in reverse +// order so that the upper-most RO layer is at the start, and the base OS layer is the +// end. +func LocateUVMFolder(layerFolders []string) (string, error) { + var uvmFolder string + index := 0 + for _, layerFolder := range layerFolders { + _, err := os.Stat(filepath.Join(layerFolder, `UtilityVM`)) + if err == nil { + uvmFolder = layerFolder + break + } + if !os.IsNotExist(err) { + return "", err + } + index++ + } + if uvmFolder == "" { + return "", fmt.Errorf("utility VM folder could not be found in layers") + } + logrus.Debugf("hcsshim::LocateUVMFolder At %d of %d: %s", index+1, len(layerFolders), uvmFolder) + return uvmFolder, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go new file mode 100644 index 00000000..dcb91926 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -0,0 +1,32 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// ActivateLayer will find the layer with the given id and mount it's filesystem. +// For a read/write layer, the mounted filesystem will appear as a volume on the +// host, while a read-only layer is generally expected to be a no-op. +// An activated layer must later be deactivated via DeactivateLayer. +func ActivateLayer(path string) (err error) { + title := "hcsshim::ActivateLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + err = activateLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go new file mode 100644 index 00000000..5784241d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go @@ -0,0 +1,173 @@ +package wclayer + +import ( + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/safefile" +) + +type baseLayerWriter struct { + root *os.File + f *os.File + bw *winio.BackupFileWriter + err error + hasUtilityVM bool + dirInfo []dirInfo +} + +type dirInfo struct { + path string + fileInfo winio.FileBasicInfo +} + +// reapplyDirectoryTimes reapplies directory modification, creation, etc. times +// after processing of the directory tree has completed. The times are expected +// to be ordered such that parent directories come before child directories. +func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { + for i := range dis { + di := &dis[len(dis)-i-1] // reverse order: process child directories first + f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_OPEN, safefile.FILE_DIRECTORY_FILE) + if err != nil { + return err + } + + err = winio.SetFileBasicInfo(f, &di.fileInfo) + f.Close() + if err != nil { + return err + } + } + return nil +} + +func (w *baseLayerWriter) closeCurrentFile() error { + if w.f != nil { + err := w.bw.Close() + err2 := w.f.Close() + w.f = nil + w.bw = nil + if err != nil { + return err + } + if err2 != nil { + return err2 + } + } + return nil +} + +func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + if filepath.ToSlash(name) == `UtilityVM/Files` { + w.hasUtilityVM = true + } + + var f *os.File + defer func() { + if f != nil { + f.Close() + } + }() + + extraFlags := uint32(0) + if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + extraFlags |= safefile.FILE_DIRECTORY_FILE + if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { + w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) + } + } + + mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) + f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, extraFlags) + if err != nil { + return hcserror.New(err, "Failed to safefile.OpenRelative", name) + } + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return hcserror.New(err, "Failed to SetFileBasicInfo", name) + } + + w.f = f + w.bw = winio.NewBackupFileWriter(f, true) + f = nil + return nil +} + +func (w *baseLayerWriter) AddLink(name string, target string) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + return safefile.LinkRelative(target, w.root, name, w.root) +} + +func (w *baseLayerWriter) Remove(name string) error { + return errors.New("base layer cannot have tombstones") +} + +func (w *baseLayerWriter) Write(b []byte) (int, error) { + n, err := w.bw.Write(b) + if err != nil { + w.err = err + } + return n, err +} + +func (w *baseLayerWriter) Close() error { + defer func() { + w.root.Close() + w.root = nil + }() + err := w.closeCurrentFile() + if err != nil { + return err + } + if w.err == nil { + // Restore the file times of all the directories, since they may have + // been modified by creating child directories. + err = reapplyDirectoryTimes(w.root, w.dirInfo) + if err != nil { + return err + } + + err = ProcessBaseLayer(w.root.Name()) + if err != nil { + return err + } + + if w.hasUtilityVM { + err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root) + if err != nil { + return err + } + err = ProcessUtilityVMImage(filepath.Join(w.root.Name(), "UtilityVM")) + if err != nil { + return err + } + } + } + return w.err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go new file mode 100644 index 00000000..be2bc3fd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -0,0 +1,31 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// CreateLayer creates a new, empty, read-only layer on the filesystem based on +// the parent layer provided. +func CreateLayer(path, parent string) (err error) { + title := "hcsshim::CreateLayer" + fields := logrus.Fields{ + "parent": parent, + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + err = createLayer(&stdDriverInfo, path, parent) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go new file mode 100644 index 00000000..7e335128 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -0,0 +1,38 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// CreateScratchLayer creates and populates new read-write layer for use by a container. +// This requires both the id of the direct parent layer, as well as the full list +// of paths to all parent layers up to the base (and including the direct parent +// whose id was provided). +func CreateScratchLayer(path string, parentLayerPaths []string) (err error) { + title := "hcsshim::CreateScratchLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return err + } + + err = createSandboxLayer(&stdDriverInfo, path, 0, layers) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go new file mode 100644 index 00000000..2dd5d571 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go @@ -0,0 +1,29 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. +func DeactivateLayer(path string) (err error) { + title := "hcsshim::DeactivateLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + err = deactivateLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title+"- failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go new file mode 100644 index 00000000..4da690c2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -0,0 +1,30 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// DestroyLayer will remove the on-disk files representing the layer with the given +// path, including that layer's containing folder, if any. +func DestroyLayer(path string) (err error) { + title := "hcsshim::DestroyLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + err = destroyLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go new file mode 100644 index 00000000..651676fb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -0,0 +1,30 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// ExpandScratchSize expands the size of a layer to at least size bytes. +func ExpandScratchSize(path string, size uint64) (err error) { + title := "hcsshim::ExpandScratchSize" + fields := logrus.Fields{ + "path": path, + "size": size, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + err = expandSandboxSize(&stdDriverInfo, path, size) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go new file mode 100644 index 00000000..0425b339 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -0,0 +1,76 @@ +package wclayer + +import ( + "io/ioutil" + "os" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// ExportLayer will create a folder at exportFolderPath and fill that folder with +// the transport format version of the layer identified by layerId. This transport +// format includes any metadata required for later importing the layer (using +// ImportLayer), and requires the full list of parent layer paths in order to +// perform the export. +func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ExportLayer" + fields := logrus.Fields{ + "path": path, + "exportFolderPath": exportFolderPath, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return err + } + + err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} + +type LayerReader interface { + Next() (string, int64, *winio.FileBasicInfo, error) + Read(b []byte) (int, error) + Close() error +} + +// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. +// The caller must have taken the SeBackupPrivilege privilege +// to call this and any methods on the resulting LayerReader. +func NewLayerReader(path string, parentLayerPaths []string) (LayerReader, error) { + exportPath, err := ioutil.TempDir("", "hcs") + if err != nil { + return nil, err + } + err = ExportLayer(path, exportPath, parentLayerPaths) + if err != nil { + os.RemoveAll(exportPath) + return nil, err + } + return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil +} + +type legacyLayerReaderWrapper struct { + *legacyLayerReader +} + +func (r *legacyLayerReaderWrapper) Close() error { + err := r.legacyLayerReader.Close() + os.RemoveAll(r.root) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go new file mode 100644 index 00000000..d60b6ed5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -0,0 +1,56 @@ +package wclayer + +import ( + "syscall" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// GetLayerMountPath will look for a mounted layer with the given path and return +// the path at which that layer can be accessed. This path may be a volume path +// if the layer is a mounted read-write layer, otherwise it is expected to be the +// folder path at which the layer is stored. +func GetLayerMountPath(path string) (_ string, err error) { + title := "hcsshim::GetLayerMountPath" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + var mountPathLength uintptr + mountPathLength = 0 + + // Call the procedure itself. + logrus.WithFields(fields).Debug("Calling proc (1)") + err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) + if err != nil { + return "", hcserror.New(err, title+" - failed", "(first call)") + } + + // Allocate a mount path of the returned length. + if mountPathLength == 0 { + return "", nil + } + mountPathp := make([]uint16, mountPathLength) + mountPathp[0] = 0 + + // Call the procedure again + logrus.WithFields(fields).Debug("Calling proc (2)") + err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) + if err != nil { + return "", hcserror.New(err, title+" - failed", "(second call)") + } + + mountPath := syscall.UTF16ToString(mountPathp[0:]) + fields["mountPath"] = mountPath + return mountPath, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go new file mode 100644 index 00000000..dbd83ef2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -0,0 +1,29 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// GetSharedBaseImages will enumerate the images stored in the common central +// image store and return descriptive info about those images for the purpose +// of registering them with the graphdriver, graph, and tagstore. +func GetSharedBaseImages() (imageData string, err error) { + title := "hcsshim::GetSharedBaseImages" + logrus.Debug(title) + defer func() { + if err != nil { + logrus.WithError(err).Error(err) + } else { + logrus.WithField("imageData", imageData).Debug(title + " - succeeded") + } + }() + + var buffer *uint16 + err = getBaseImages(&buffer) + if err != nil { + return "", hcserror.New(err, title+" - failed", "") + } + return interop.ConvertAndFreeCoTaskMemString(buffer), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go new file mode 100644 index 00000000..05735df6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -0,0 +1,30 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// GrantVmAccess adds access to a file for a given VM +func GrantVmAccess(vmid string, filepath string) (err error) { + title := "hcsshim::GrantVmAccess" + fields := logrus.Fields{ + "vm-id": vmid, + "path": filepath, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + err = grantVmAccess(vmid, filepath) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go new file mode 100644 index 00000000..76a804f2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -0,0 +1,135 @@ +package wclayer + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/sirupsen/logrus" +) + +// ImportLayer will take the contents of the folder at importFolderPath and import +// that into a layer with the id layerId. Note that in order to correctly populate +// the layer and interperet the transport format, all parent layers must already +// be present on the system at the paths provided in parentLayerPaths. +func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ImportLayer" + fields := logrus.Fields{ + "path": path, + "importFolderPath": importFolderPath, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return err + } + + err = importLayer(&stdDriverInfo, path, importFolderPath, layers) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} + +// LayerWriter is an interface that supports writing a new container image layer. +type LayerWriter interface { + // Add adds a file to the layer with given metadata. + Add(name string, fileInfo *winio.FileBasicInfo) error + // AddLink adds a hard link to the layer. The target must already have been added. + AddLink(name string, target string) error + // Remove removes a file that was present in a parent layer from the layer. + Remove(name string) error + // Write writes data to the current file. The data must be in the format of a Win32 + // backup stream. + Write(b []byte) (int, error) + // Close finishes the layer writing process and releases any resources. + Close() error +} + +type legacyLayerWriterWrapper struct { + *legacyLayerWriter + path string + parentLayerPaths []string +} + +func (r *legacyLayerWriterWrapper) Close() error { + defer os.RemoveAll(r.root.Name()) + defer r.legacyLayerWriter.CloseRoots() + err := r.legacyLayerWriter.Close() + if err != nil { + return err + } + + if err = ImportLayer(r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { + return err + } + for _, name := range r.Tombstones { + if err = safefile.RemoveRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) { + return err + } + } + // Add any hard links that were collected. + for _, lnk := range r.PendingLinks { + if err = safefile.RemoveRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) { + return err + } + if err = safefile.LinkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil { + return err + } + } + // Prepare the utility VM for use if one is present in the layer. + if r.HasUtilityVM { + err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot) + if err != nil { + return err + } + err = ProcessUtilityVMImage(filepath.Join(r.destRoot.Name(), "UtilityVM")) + if err != nil { + return err + } + } + return nil +} + +// NewLayerWriter returns a new layer writer for creating a layer on disk. +// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges +// to call this and any methods on the resulting LayerWriter. +func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) { + if len(parentLayerPaths) == 0 { + // This is a base layer. It gets imported differently. + f, err := safefile.OpenRoot(path) + if err != nil { + return nil, err + } + return &baseLayerWriter{ + root: f, + }, nil + } + + importPath, err := ioutil.TempDir("", "hcs") + if err != nil { + return nil, err + } + w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path) + if err != nil { + return nil, err + } + return &legacyLayerWriterWrapper{ + legacyLayerWriter: w, + path: importPath, + parentLayerPaths: parentLayerPaths, + }, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go new file mode 100644 index 00000000..258167a5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -0,0 +1,33 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// LayerExists will return true if a layer with the given id exists and is known +// to the system. +func LayerExists(path string) (_ bool, err error) { + title := "hcsshim::LayerExists" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + // Call the procedure itself. + var exists uint32 + err = layerExists(&stdDriverInfo, path, &exists) + if err != nil { + return false, hcserror.New(err, title+" - failed", "") + } + fields["layer-exists"] = exists != 0 + return exists != 0, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go new file mode 100644 index 00000000..90df3bed --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go @@ -0,0 +1,13 @@ +package wclayer + +import ( + "path/filepath" + + "github.com/Microsoft/hcsshim/internal/guid" +) + +// LayerID returns the layer ID of a layer on disk. +func LayerID(path string) (guid.GUID, error) { + _, file := filepath.Split(path) + return NameToGuid(file) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go new file mode 100644 index 00000000..6d0ae8a0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go @@ -0,0 +1,96 @@ +package wclayer + +// This file contains utility functions to support storage (graph) related +// functionality. + +import ( + "syscall" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/sirupsen/logrus" +) + +/* To pass into syscall, we need a struct matching the following: +enum GraphDriverType +{ + DiffDriver, + FilterDriver +}; + +struct DriverInfo { + GraphDriverType Flavour; + LPCWSTR HomeDir; +}; +*/ + +type driverInfo struct { + Flavour int + HomeDirp *uint16 +} + +var ( + utf16EmptyString uint16 + stdDriverInfo = driverInfo{1, &utf16EmptyString} +) + +/* To pass into syscall, we need a struct matching the following: +typedef struct _WC_LAYER_DESCRIPTOR { + + // + // The ID of the layer + // + + GUID LayerId; + + // + // Additional flags + // + + union { + struct { + ULONG Reserved : 31; + ULONG Dirty : 1; // Created from sandbox as a result of snapshot + }; + ULONG Value; + } Flags; + + // + // Path to the layer root directory, null-terminated + // + + PCWSTR Path; + +} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; +*/ +type WC_LAYER_DESCRIPTOR struct { + LayerId guid.GUID + Flags uint32 + Pathp *uint16 +} + +func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { + // Array of descriptors that gets constructed. + var layers []WC_LAYER_DESCRIPTOR + + for i := 0; i < len(parentLayerPaths); i++ { + g, err := LayerID(parentLayerPaths[i]) + if err != nil { + logrus.WithError(err).Debug("Failed to convert name to guid") + return nil, err + } + + p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) + if err != nil { + logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer") + return nil, err + } + + layers = append(layers, WC_LAYER_DESCRIPTOR{ + LayerId: g, + Flags: 0, + Pathp: p, + }) + } + + return layers, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go new file mode 100644 index 00000000..b8ea5d26 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -0,0 +1,815 @@ +package wclayer + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/safefile" +) + +var errorIterationCanceled = errors.New("") + +var mutatedUtilityVMFiles = map[string]bool{ + `EFI\Microsoft\Boot\BCD`: true, + `EFI\Microsoft\Boot\BCD.LOG`: true, + `EFI\Microsoft\Boot\BCD.LOG1`: true, + `EFI\Microsoft\Boot\BCD.LOG2`: true, +} + +const ( + filesPath = `Files` + hivesPath = `Hives` + utilityVMPath = `UtilityVM` + utilityVMFilesPath = `UtilityVM\Files` +) + +func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { + return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition) +} + +func hasPathPrefix(p, prefix string) bool { + return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\' +} + +type fileEntry struct { + path string + fi os.FileInfo + err error +} + +type legacyLayerReader struct { + root string + result chan *fileEntry + proceed chan bool + currentFile *os.File + backupReader *winio.BackupFileReader +} + +// newLegacyLayerReader returns a new LayerReader that can read the Windows +// container layer transport format from disk. +func newLegacyLayerReader(root string) *legacyLayerReader { + r := &legacyLayerReader{ + root: root, + result: make(chan *fileEntry), + proceed: make(chan bool), + } + go r.walk() + return r +} + +func readTombstones(path string) (map[string]([]string), error) { + tf, err := os.Open(filepath.Join(path, "tombstones.txt")) + if err != nil { + return nil, err + } + defer tf.Close() + s := bufio.NewScanner(tf) + if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { + return nil, errors.New("Invalid tombstones file") + } + + ts := make(map[string]([]string)) + for s.Scan() { + t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\` + dir := filepath.Dir(t) + ts[dir] = append(ts[dir], t) + } + if err = s.Err(); err != nil { + return nil, err + } + + return ts, nil +} + +func (r *legacyLayerReader) walkUntilCancelled() error { + root, err := longpath.LongAbs(r.root) + if err != nil { + return err + } + + r.root = root + ts, err := readTombstones(r.root) + if err != nil { + return err + } + + err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048. + // Handle failure from what may be a golang bug in the conversion of + // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat + // which is called by filepath.Walk will fail when a filename contains + // unicode characters. Skip the recycle bin regardless which is goodness. + if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { + return filepath.SkipDir + } + + if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { + return nil + } + + r.result <- &fileEntry{path, info, nil} + if !<-r.proceed { + return errorIterationCanceled + } + + // List all the tombstones. + if info.IsDir() { + relPath, err := filepath.Rel(r.root, path) + if err != nil { + return err + } + if dts, ok := ts[relPath]; ok { + for _, t := range dts { + r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil} + if !<-r.proceed { + return errorIterationCanceled + } + } + } + } + return nil + }) + if err == errorIterationCanceled { + return nil + } + if err == nil { + return io.EOF + } + return err +} + +func (r *legacyLayerReader) walk() { + defer close(r.result) + if !<-r.proceed { + return + } + + err := r.walkUntilCancelled() + if err != nil { + for { + r.result <- &fileEntry{err: err} + if !<-r.proceed { + return + } + } + } +} + +func (r *legacyLayerReader) reset() { + if r.backupReader != nil { + r.backupReader.Close() + r.backupReader = nil + } + if r.currentFile != nil { + r.currentFile.Close() + r.currentFile = nil + } +} + +func findBackupStreamSize(r io.Reader) (int64, error) { + br := winio.NewBackupStreamReader(r) + for { + hdr, err := br.Next() + if err != nil { + if err == io.EOF { + err = nil + } + return 0, err + } + if hdr.Id == winio.BackupData { + return hdr.Size, nil + } + } +} + +func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { + r.reset() + r.proceed <- true + fe := <-r.result + if fe == nil { + err = errors.New("LegacyLayerReader closed") + return + } + if fe.err != nil { + err = fe.err + return + } + + path, err = filepath.Rel(r.root, fe.path) + if err != nil { + return + } + + if fe.fi == nil { + // This is a tombstone. Return a nil fileInfo. + return + } + + if fe.fi.IsDir() && hasPathPrefix(path, filesPath) { + fe.path += ".$wcidirs$" + } + + f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) + if err != nil { + return + } + defer func() { + if f != nil { + f.Close() + } + }() + + fileInfo, err = winio.GetFileBasicInfo(f) + if err != nil { + return + } + + if !hasPathPrefix(path, filesPath) { + size = fe.fi.Size() + r.backupReader = winio.NewBackupFileReader(f, false) + if path == hivesPath || path == filesPath { + // The Hives directory has a non-deterministic file time because of the + // nature of the import process. Use the times from System_Delta. + var g *os.File + g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`)) + if err != nil { + return + } + attr := fileInfo.FileAttributes + fileInfo, err = winio.GetFileBasicInfo(g) + g.Close() + if err != nil { + return + } + fileInfo.FileAttributes = attr + } + + // The creation time and access time get reset for files outside of the Files path. + fileInfo.CreationTime = fileInfo.LastWriteTime + fileInfo.LastAccessTime = fileInfo.LastWriteTime + + } else { + // The file attributes are written before the backup stream. + var attr uint32 + err = binary.Read(f, binary.LittleEndian, &attr) + if err != nil { + return + } + fileInfo.FileAttributes = attr + beginning := int64(4) + + // Find the accurate file size. + if !fe.fi.IsDir() { + size, err = findBackupStreamSize(f) + if err != nil { + err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err} + return + } + } + + // Return back to the beginning of the backup stream. + _, err = f.Seek(beginning, 0) + if err != nil { + return + } + } + + r.currentFile = f + f = nil + return +} + +func (r *legacyLayerReader) Read(b []byte) (int, error) { + if r.backupReader == nil { + if r.currentFile == nil { + return 0, io.EOF + } + return r.currentFile.Read(b) + } + return r.backupReader.Read(b) +} + +func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) { + if r.backupReader == nil { + if r.currentFile == nil { + return 0, errors.New("no current file") + } + return r.currentFile.Seek(offset, whence) + } + return 0, errors.New("seek not supported on this stream") +} + +func (r *legacyLayerReader) Close() error { + r.proceed <- false + <-r.result + r.reset() + return nil +} + +type pendingLink struct { + Path, Target string + TargetRoot *os.File +} + +type pendingDir struct { + Path string + Root *os.File +} + +type legacyLayerWriter struct { + root *os.File + destRoot *os.File + parentRoots []*os.File + currentFile *os.File + bufWriter *bufio.Writer + currentFileName string + currentFileRoot *os.File + backupWriter *winio.BackupFileWriter + Tombstones []string + HasUtilityVM bool + uvmDi []dirInfo + addedFiles map[string]bool + PendingLinks []pendingLink + pendingDirs []pendingDir + currentIsDir bool +} + +// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer +// transport format to disk. +func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { + w = &legacyLayerWriter{ + addedFiles: make(map[string]bool), + } + defer func() { + if err != nil { + w.CloseRoots() + w = nil + } + }() + w.root, err = safefile.OpenRoot(root) + if err != nil { + return + } + w.destRoot, err = safefile.OpenRoot(destRoot) + if err != nil { + return + } + for _, r := range parentRoots { + f, err := safefile.OpenRoot(r) + if err != nil { + return w, err + } + w.parentRoots = append(w.parentRoots, f) + } + w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536) + return +} + +func (w *legacyLayerWriter) CloseRoots() { + if w.root != nil { + w.root.Close() + w.root = nil + } + if w.destRoot != nil { + w.destRoot.Close() + w.destRoot = nil + } + for i := range w.parentRoots { + w.parentRoots[i].Close() + } + w.parentRoots = nil +} + +func (w *legacyLayerWriter) initUtilityVM() error { + if !w.HasUtilityVM { + err := safefile.MkdirRelative(utilityVMPath, w.destRoot) + if err != nil { + return err + } + // Server 2016 does not support multiple layers for the utility VM, so + // clone the utility VM from the parent layer into this layer. Use hard + // links to avoid unnecessary copying, since most of the files are + // immutable. + err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles) + if err != nil { + return fmt.Errorf("cloning the parent utility VM image failed: %s", err) + } + w.HasUtilityVM = true + } + return nil +} + +func (w *legacyLayerWriter) reset() error { + err := w.bufWriter.Flush() + if err != nil { + return err + } + w.bufWriter.Reset(ioutil.Discard) + if w.currentIsDir { + r := w.currentFile + br := winio.NewBackupStreamReader(r) + // Seek to the beginning of the backup stream, skipping the fileattrs + if _, err := r.Seek(4, io.SeekStart); err != nil { + return err + } + + for { + bhdr, err := br.Next() + if err == io.EOF { + // end of backupstream data + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupReparseData: + // The current file is a `.$wcidirs$` metadata file that + // describes a directory reparse point. Delete the placeholder + // directory to prevent future files being added into the + // destination of the reparse point during the ImportLayer call + if err := safefile.RemoveRelative(w.currentFileName, w.currentFileRoot); err != nil { + return err + } + w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot}) + default: + // ignore all other stream types, as we only care about directory reparse points + } + } + w.currentIsDir = false + } + if w.backupWriter != nil { + w.backupWriter.Close() + w.backupWriter = nil + } + if w.currentFile != nil { + w.currentFile.Close() + w.currentFile = nil + w.currentFileName = "" + w.currentFileRoot = nil + } + return nil +} + +// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata +func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { + src, err := safefile.OpenRelative( + subPath, + srcRoot, + syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + safefile.FILE_OPEN, + safefile.FILE_OPEN_REPARSE_POINT) + if err != nil { + return nil, err + } + defer src.Close() + srcr := winio.NewBackupFileReader(src, true) + defer srcr.Close() + + fileInfo, err = winio.GetFileBasicInfo(src) + if err != nil { + return nil, err + } + + extraFlags := uint32(0) + if isDir { + extraFlags |= safefile.FILE_DIRECTORY_FILE + } + dest, err := safefile.OpenRelative( + subPath, + destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + safefile.FILE_CREATE, + extraFlags) + if err != nil { + return nil, err + } + defer dest.Close() + + err = winio.SetFileBasicInfo(dest, fileInfo) + if err != nil { + return nil, err + } + + destw := winio.NewBackupFileWriter(dest, true) + defer func() { + cerr := destw.Close() + if err == nil { + err = cerr + } + }() + + _, err = io.Copy(destw, srcr) + if err != nil { + return nil, err + } + + return fileInfo, nil +} + +// cloneTree clones a directory tree using hard links. It skips hard links for +// the file names in the provided map and just copies those files. +func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error { + var di []dirInfo + err := safefile.EnsureNotReparsePointRelative(subPath, srcRoot) + if err != nil { + return err + } + err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath) + if err != nil { + return err + } + + fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes + // Directories, reparse points, and files that will be mutated during + // utility VM import must be copied. All other files can be hard linked. + isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 + // In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink. + // See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc + // Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly + isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 + + if isDir || isReparsePoint || mutatedFiles[relPath] { + fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir) + if err != nil { + return err + } + if isDir && !isReparsePoint { + di = append(di, dirInfo{path: relPath, fileInfo: *fi}) + } + } else { + err = safefile.LinkRelative(relPath, srcRoot, relPath, destRoot) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + + return reapplyDirectoryTimes(destRoot, di) +} + +func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { + if err := w.reset(); err != nil { + return err + } + + if name == utilityVMPath { + return w.initUtilityVM() + } + + name = filepath.Clean(name) + if hasPathPrefix(name, utilityVMPath) { + if !w.HasUtilityVM { + return errors.New("missing UtilityVM directory") + } + if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { + return errors.New("invalid UtilityVM layer") + } + createDisposition := uint32(safefile.FILE_OPEN) + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + st, err := safefile.LstatRelative(name, w.destRoot) + if err != nil && !os.IsNotExist(err) { + return err + } + if st != nil { + // Delete the existing file/directory if it is not the same type as this directory. + existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + if err = safefile.RemoveAllRelative(name, w.destRoot); err != nil { + return err + } + st = nil + } + } + if st == nil { + if err = safefile.MkdirRelative(name, w.destRoot); err != nil { + return err + } + } + if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { + w.uvmDi = append(w.uvmDi, dirInfo{path: name, fileInfo: *fileInfo}) + } + } else { + // Overwrite any existing hard link. + err := safefile.RemoveRelative(name, w.destRoot) + if err != nil && !os.IsNotExist(err) { + return err + } + createDisposition = safefile.FILE_CREATE + } + + f, err := safefile.OpenRelative( + name, + w.destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + createDisposition, + safefile.FILE_OPEN_REPARSE_POINT, + ) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + safefile.RemoveRelative(name, w.destRoot) + } + }() + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return err + } + + w.backupWriter = winio.NewBackupFileWriter(f, true) + w.bufWriter.Reset(w.backupWriter) + w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.destRoot + w.addedFiles[name] = true + f = nil + return nil + } + + fname := name + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + err := safefile.MkdirRelative(name, w.root) + if err != nil { + return err + } + fname += ".$wcidirs$" + w.currentIsDir = true + } + + f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, 0) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + safefile.RemoveRelative(fname, w.root) + } + }() + + strippedFi := *fileInfo + strippedFi.FileAttributes = 0 + err = winio.SetFileBasicInfo(f, &strippedFi) + if err != nil { + return err + } + + if hasPathPrefix(name, hivesPath) { + w.backupWriter = winio.NewBackupFileWriter(f, false) + w.bufWriter.Reset(w.backupWriter) + } else { + w.bufWriter.Reset(f) + // The file attributes are written before the stream. + err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes)) + if err != nil { + w.bufWriter.Reset(ioutil.Discard) + return err + } + } + + w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.root + w.addedFiles[name] = true + f = nil + return nil +} + +func (w *legacyLayerWriter) AddLink(name string, target string) error { + if err := w.reset(); err != nil { + return err + } + + target = filepath.Clean(target) + var roots []*os.File + if hasPathPrefix(target, filesPath) { + // Look for cross-layer hard link targets in the parent layers, since + // nothing is in the destination path yet. + roots = w.parentRoots + } else if hasPathPrefix(target, utilityVMFilesPath) { + // Since the utility VM is fully cloned into the destination path + // already, look for cross-layer hard link targets directly in the + // destination path. + roots = []*os.File{w.destRoot} + } + + if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { + return errors.New("invalid hard link in layer") + } + + // Find to try the target of the link in a previously added file. If that + // fails, search in parent layers. + var selectedRoot *os.File + if _, ok := w.addedFiles[target]; ok { + selectedRoot = w.destRoot + } else { + for _, r := range roots { + if _, err := safefile.LstatRelative(target, r); err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + selectedRoot = r + break + } + } + if selectedRoot == nil { + return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) + } + } + + // The link can't be written until after the ImportLayer call. + w.PendingLinks = append(w.PendingLinks, pendingLink{ + Path: name, + Target: target, + TargetRoot: selectedRoot, + }) + w.addedFiles[name] = true + return nil +} + +func (w *legacyLayerWriter) Remove(name string) error { + name = filepath.Clean(name) + if hasPathPrefix(name, filesPath) { + w.Tombstones = append(w.Tombstones, name) + } else if hasPathPrefix(name, utilityVMFilesPath) { + err := w.initUtilityVM() + if err != nil { + return err + } + // Make sure the path exists; os.RemoveAll will not fail if the file is + // already gone, and this needs to be a fatal error for diagnostics + // purposes. + if _, err := safefile.LstatRelative(name, w.destRoot); err != nil { + return err + } + err = safefile.RemoveAllRelative(name, w.destRoot) + if err != nil { + return err + } + } else { + return fmt.Errorf("invalid tombstone %s", name) + } + + return nil +} + +func (w *legacyLayerWriter) Write(b []byte) (int, error) { + if w.backupWriter == nil && w.currentFile == nil { + return 0, errors.New("closed") + } + return w.bufWriter.Write(b) +} + +func (w *legacyLayerWriter) Close() error { + if err := w.reset(); err != nil { + return err + } + if err := safefile.RemoveRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) { + return err + } + for _, pd := range w.pendingDirs { + err := safefile.MkdirRelative(pd.Path, pd.Root) + if err != nil { + return err + } + } + if w.HasUtilityVM { + err := reapplyDirectoryTimes(w.destRoot, w.uvmDi) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go new file mode 100644 index 00000000..45a63cf6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -0,0 +1,34 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// NameToGuid converts the given string into a GUID using the algorithm in the +// Host Compute Service, ensuring GUIDs generated with the same string are common +// across all clients. +func NameToGuid(name string) (id guid.GUID, err error) { + title := "hcsshim::NameToGuid" + fields := logrus.Fields{ + "name": name, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + err = nameToGuid(name, &id) + if err != nil { + err = hcserror.New(err, title+" - failed", "") + return + } + fields["guid"] = id.String() + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go new file mode 100644 index 00000000..2b65b018 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -0,0 +1,47 @@ +package wclayer + +import ( + "sync" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +var prepareLayerLock sync.Mutex + +// PrepareLayer finds a mounted read-write layer matching path and enables the +// the filesystem filter for use on that layer. This requires the paths to all +// parent layers, and is necessary in order to view or interact with the layer +// as an actual filesystem (reading and writing files, creating directories, etc). +// Disabling the filter must be done via UnprepareLayer. +func PrepareLayer(path string, parentLayerPaths []string) (err error) { + title := "hcsshim::PrepareLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(parentLayerPaths) + if err != nil { + return err + } + + // This lock is a temporary workaround for a Windows bug. Only allowing one + // call to prepareLayer at a time vastly reduces the chance of a timeout. + prepareLayerLock.Lock() + defer prepareLayerLock.Unlock() + err = prepareLayer(&stdDriverInfo, path, layers) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go new file mode 100644 index 00000000..884207c3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go @@ -0,0 +1,23 @@ +package wclayer + +import "os" + +// ProcessBaseLayer post-processes a base layer that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessBaseLayer(path string) error { + err := processBaseImage(path) + if err != nil { + return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err} + } + return nil +} + +// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessUtilityVMImage(path string) error { + err := processUtilityImage(path) + if err != nil { + return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err} + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go new file mode 100644 index 00000000..bccd4596 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -0,0 +1,30 @@ +package wclayer + +import ( + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/sirupsen/logrus" +) + +// UnprepareLayer disables the filesystem filter for the read-write layer with +// the given id. +func UnprepareLayer(path string) (err error) { + title := "hcsshim::UnprepareLayer" + fields := logrus.Fields{ + "path": path, + } + logrus.WithFields(fields).Debug(title) + defer func() { + if err != nil { + fields[logrus.ErrorKey] = err + logrus.WithFields(fields).Error(err) + } else { + logrus.WithFields(fields).Debug(title + " - succeeded") + } + }() + + err = unprepareLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go new file mode 100644 index 00000000..78f2aacd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go @@ -0,0 +1,27 @@ +package wclayer + +import "github.com/Microsoft/hcsshim/internal/guid" + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go + +//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? +//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? +//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer? +//sys createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer? +//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize? +//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer? +//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer? +//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer? +//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath? +//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages? +//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer? +//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists? +//sys nameToGuid(name string, guid *_guid) (hr error) = vmcompute.NameToGuid? +//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer? +//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer? +//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? +//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? + +//sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess? + +type _guid = guid.GUID diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go new file mode 100644 index 00000000..d853ab25 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go @@ -0,0 +1,510 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package wclayer + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procActivateLayer = modvmcompute.NewProc("ActivateLayer") + procCopyLayer = modvmcompute.NewProc("CopyLayer") + procCreateLayer = modvmcompute.NewProc("CreateLayer") + procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer") + procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") + procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer") + procDestroyLayer = modvmcompute.NewProc("DestroyLayer") + procExportLayer = modvmcompute.NewProc("ExportLayer") + procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") + procGetBaseImages = modvmcompute.NewProc("GetBaseImages") + procImportLayer = modvmcompute.NewProc("ImportLayer") + procLayerExists = modvmcompute.NewProc("LayerExists") + procNameToGuid = modvmcompute.NewProc("NameToGuid") + procPrepareLayer = modvmcompute.NewProc("PrepareLayer") + procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") + procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") + procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") + procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") +) + +func activateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _activateLayer(info, _p0) +} + +func _activateLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procActivateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(srcId) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(dstId) + if hr != nil { + return + } + return _copyLayer(info, _p0, _p1, descriptors) +} + +func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procCopyLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func createLayer(info *driverInfo, id string, parent string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(parent) + if hr != nil { + return + } + return _createLayer(info, _p0, _p1) +} + +func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { + if hr = procCreateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _createSandboxLayer(info, _p0, parent, descriptors) +} + +func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + if hr = procCreateSandboxLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _expandSandboxSize(info, _p0, size) +} + +func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { + if hr = procExpandSandboxSize.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func deactivateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _deactivateLayer(info, _p0) +} + +func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procDeactivateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func destroyLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _destroyLayer(info, _p0) +} + +func _destroyLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procDestroyLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _exportLayer(info, _p0, _p1, descriptors) +} + +func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procExportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _getLayerMountPath(info, _p0, length, buffer) +} + +func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) { + if hr = procGetLayerMountPath.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func getBaseImages(buffer **uint16) (hr error) { + if hr = procGetBaseImages.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _importLayer(info, _p0, _p1, descriptors) +} + +func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procImportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func layerExists(info *driverInfo, id string, exists *uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _layerExists(info, _p0, exists) +} + +func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { + if hr = procLayerExists.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func nameToGuid(name string, guid *_guid) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(name) + if hr != nil { + return + } + return _nameToGuid(_p0, guid) +} + +func _nameToGuid(name *uint16, guid *_guid) (hr error) { + if hr = procNameToGuid.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _prepareLayer(info, _p0, descriptors) +} + +func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + if hr = procPrepareLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func unprepareLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _unprepareLayer(info, _p0) +} + +func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procUnprepareLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func processBaseImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processBaseImage(_p0) +} + +func _processBaseImage(path *uint16) (hr error) { + if hr = procProcessBaseImage.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func processUtilityImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processUtilityImage(_p0) +} + +func _processUtilityImage(path *uint16) (hr error) { + if hr = procProcessUtilityImage.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func grantVmAccess(vmid string, filepath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(vmid) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(filepath) + if hr != nil { + return + } + return _grantVmAccess(_p0, _p1) +} + +func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { + if hr = procGrantVmAccess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go b/vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go new file mode 100644 index 00000000..0bbd46a1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go @@ -0,0 +1,26 @@ +package wcow + +import ( + "os" + "path/filepath" + + "github.com/Microsoft/hcsshim/internal/copyfile" + "github.com/Microsoft/hcsshim/internal/wclayer" + "github.com/sirupsen/logrus" +) + +// CreateUVMScratch is a helper to create a scratch for a Windows utility VM +// with permissions to the specified VM ID in a specified directory +func CreateUVMScratch(imagePath, destDirectory, vmID string) error { + sourceScratch := filepath.Join(imagePath, `UtilityVM\SystemTemplate.vhdx`) + targetScratch := filepath.Join(destDirectory, "sandbox.vhdx") + logrus.Debugf("uvm::CreateUVMScratch %s from %s", targetScratch, sourceScratch) + if err := copyfile.CopyFile(sourceScratch, targetScratch, true); err != nil { + return err + } + if err := wclayer.GrantVmAccess(vmID, targetScratch); err != nil { + os.Remove(targetScratch) + return err + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go new file mode 100644 index 00000000..df0e63bb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/layer.go @@ -0,0 +1,106 @@ +package hcsshim + +import ( + "crypto/sha1" + "path/filepath" + + "github.com/Microsoft/hcsshim/internal/guid" + "github.com/Microsoft/hcsshim/internal/wclayer" +) + +func layerPath(info *DriverInfo, id string) string { + return filepath.Join(info.HomeDir, id) +} + +func ActivateLayer(info DriverInfo, id string) error { + return wclayer.ActivateLayer(layerPath(&info, id)) +} +func CreateLayer(info DriverInfo, id, parent string) error { + return wclayer.CreateLayer(layerPath(&info, id), parent) +} + +// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. +func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { + return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths) +} +func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { + return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths) +} +func DeactivateLayer(info DriverInfo, id string) error { + return wclayer.DeactivateLayer(layerPath(&info, id)) +} +func DestroyLayer(info DriverInfo, id string) error { + return wclayer.DestroyLayer(layerPath(&info, id)) +} + +// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. +func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { + return wclayer.ExpandScratchSize(layerPath(&info, layerId), size) +} +func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { + return wclayer.ExpandScratchSize(layerPath(&info, layerId), size) +} +func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { + return wclayer.ExportLayer(layerPath(&info, layerId), exportFolderPath, parentLayerPaths) +} +func GetLayerMountPath(info DriverInfo, id string) (string, error) { + return wclayer.GetLayerMountPath(layerPath(&info, id)) +} +func GetSharedBaseImages() (imageData string, err error) { + return wclayer.GetSharedBaseImages() +} +func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { + return wclayer.ImportLayer(layerPath(&info, layerID), importFolderPath, parentLayerPaths) +} +func LayerExists(info DriverInfo, id string) (bool, error) { + return wclayer.LayerExists(layerPath(&info, id)) +} +func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { + return wclayer.PrepareLayer(layerPath(&info, layerId), parentLayerPaths) +} +func ProcessBaseLayer(path string) error { + return wclayer.ProcessBaseLayer(path) +} +func ProcessUtilityVMImage(path string) error { + return wclayer.ProcessUtilityVMImage(path) +} +func UnprepareLayer(info DriverInfo, layerId string) error { + return wclayer.UnprepareLayer(layerPath(&info, layerId)) +} + +type DriverInfo struct { + Flavour int + HomeDir string +} + +type GUID [16]byte + +func NameToGuid(name string) (id GUID, err error) { + g, err := wclayer.NameToGuid(name) + return GUID(g), err +} + +func NewGUID(source string) *GUID { + h := sha1.Sum([]byte(source)) + var g GUID + copy(g[0:], h[0:16]) + return &g +} + +func (g *GUID) ToString() string { + return (guid.GUID)(*g).String() +} + +type LayerReader = wclayer.LayerReader + +func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { + return wclayer.NewLayerReader(layerPath(&info, layerID), parentLayerPaths) +} + +type LayerWriter = wclayer.LayerWriter + +func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { + return wclayer.NewLayerWriter(layerPath(&info, layerID), parentLayerPaths) +} + +type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR diff --git a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go new file mode 100644 index 00000000..7647734d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go @@ -0,0 +1,943 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +mksyscall_windows generates windows system call bodies + +It parses all files specified on command line containing function +prototypes (like syscall_windows.go) and prints system call bodies +to standard output. + +The prototypes are marked by lines beginning with "//sys" and read +like func declarations if //sys is replaced by func, but: + +* The parameter lists must give a name for each argument. This + includes return parameters. + +* The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + +* If the return parameter is an error number, it must be named err. + +* If go func name needs to be different from it's winapi dll name, + the winapi name could be specified at the end, after "=" sign, like + //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA + +* Each function that returns err needs to supply a condition, that + return value of winapi will be tested against to detect failure. + This would set err to windows "last-error", otherwise it will be nil. + The value can be provided at end of //sys declaration, like + //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA + and is [failretval==0] by default. + +Usage: + mksyscall_windows [flags] [path ...] + +The flags are: + -output + Specify output file name (outputs to console if blank). + -trace + Generate print statement after every syscall. +*/ +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "go/format" + "go/parser" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "text/template" +) + +var ( + filename = flag.String("output", "", "output file name (standard output if omitted)") + printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall") + systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory") + winio = flag.Bool("winio", false, "import go-winio") +) + +func trim(s string) string { + return strings.Trim(s, " \t") +} + +var packageName string + +func packagename() string { + return packageName +} + +func syscalldot() string { + if packageName == "syscall" { + return "" + } + return "syscall." +} + +// Param is function parameter +type Param struct { + Name string + Type string + fn *Fn + tmpVarIdx int +} + +// tmpVar returns temp variable name that will be used to represent p during syscall. +func (p *Param) tmpVar() string { + if p.tmpVarIdx < 0 { + p.tmpVarIdx = p.fn.curTmpVarIdx + p.fn.curTmpVarIdx++ + } + return fmt.Sprintf("_p%d", p.tmpVarIdx) +} + +// BoolTmpVarCode returns source code for bool temp variable. +func (p *Param) BoolTmpVarCode() string { + const code = `var %s uint32 + if %s { + %s = 1 + } else { + %s = 0 + }` + tmp := p.tmpVar() + return fmt.Sprintf(code, tmp, p.Name, tmp, tmp) +} + +// SliceTmpVarCode returns source code for slice temp variable. +func (p *Param) SliceTmpVarCode() string { + const code = `var %s *%s + if len(%s) > 0 { + %s = &%s[0] + }` + tmp := p.tmpVar() + return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name) +} + +// StringTmpVarCode returns source code for string temp variable. +func (p *Param) StringTmpVarCode() string { + errvar := p.fn.Rets.ErrorVarName() + if errvar == "" { + errvar = "_" + } + tmp := p.tmpVar() + const code = `var %s %s + %s, %s = %s(%s)` + s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name) + if errvar == "-" { + return s + } + const morecode = ` + if %s != nil { + return + }` + return s + fmt.Sprintf(morecode, errvar) +} + +// TmpVarCode returns source code for temp variable. +func (p *Param) TmpVarCode() string { + switch { + case p.Type == "bool": + return p.BoolTmpVarCode() + case strings.HasPrefix(p.Type, "[]"): + return p.SliceTmpVarCode() + default: + return "" + } +} + +// TmpVarHelperCode returns source code for helper's temp variable. +func (p *Param) TmpVarHelperCode() string { + if p.Type != "string" { + return "" + } + return p.StringTmpVarCode() +} + +// SyscallArgList returns source code fragments representing p parameter +// in syscall. Slices are translated into 2 syscall parameters: pointer to +// the first element and length. +func (p *Param) SyscallArgList() []string { + t := p.HelperType() + var s string + switch { + case t[0] == '*': + s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name) + case t == "bool": + s = p.tmpVar() + case strings.HasPrefix(t, "[]"): + return []string{ + fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()), + fmt.Sprintf("uintptr(len(%s))", p.Name), + } + default: + s = p.Name + } + return []string{fmt.Sprintf("uintptr(%s)", s)} +} + +// IsError determines if p parameter is used to return error. +func (p *Param) IsError() bool { + return p.Name == "err" && p.Type == "error" +} + +// HelperType returns type of parameter p used in helper function. +func (p *Param) HelperType() string { + if p.Type == "string" { + return p.fn.StrconvType() + } + return p.Type +} + +// join concatenates parameters ps into a string with sep separator. +// Each parameter is converted into string by applying fn to it +// before conversion. +func join(ps []*Param, fn func(*Param) string, sep string) string { + if len(ps) == 0 { + return "" + } + a := make([]string, 0) + for _, p := range ps { + a = append(a, fn(p)) + } + return strings.Join(a, sep) +} + +// Rets describes function return parameters. +type Rets struct { + Name string + Type string + ReturnsError bool + FailCond string +} + +// ErrorVarName returns error variable name for r. +func (r *Rets) ErrorVarName() string { + if r.ReturnsError { + return "err" + } + if r.Type == "error" { + return r.Name + } + return "" +} + +// ToParams converts r into slice of *Param. +func (r *Rets) ToParams() []*Param { + ps := make([]*Param, 0) + if len(r.Name) > 0 { + ps = append(ps, &Param{Name: r.Name, Type: r.Type}) + } + if r.ReturnsError { + ps = append(ps, &Param{Name: "err", Type: "error"}) + } + return ps +} + +// List returns source code of syscall return parameters. +func (r *Rets) List() string { + s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ") + if len(s) > 0 { + s = "(" + s + ")" + } + return s +} + +// PrintList returns source code of trace printing part correspondent +// to syscall return values. +func (r *Rets) PrintList() string { + return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +} + +// SetReturnValuesCode returns source code that accepts syscall return values. +func (r *Rets) SetReturnValuesCode() string { + if r.Name == "" && !r.ReturnsError { + return "" + } + retvar := "r0" + if r.Name == "" { + retvar = "r1" + } + errvar := "_" + if r.ReturnsError { + errvar = "e1" + } + return fmt.Sprintf("%s, _, %s := ", retvar, errvar) +} + +func (r *Rets) useLongHandleErrorCode(retvar string) string { + const code = `if %s { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = %sEINVAL + } + }` + cond := retvar + " == 0" + if r.FailCond != "" { + cond = strings.Replace(r.FailCond, "failretval", retvar, 1) + } + return fmt.Sprintf(code, cond, syscalldot()) +} + +// SetErrorCode returns source code that sets return parameters. +func (r *Rets) SetErrorCode() string { + const code = `if r0 != 0 { + %s = %sErrno(r0) + }` + const hrCode = `if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + %s = %sErrno(r0) + }` + if r.Name == "" && !r.ReturnsError { + return "" + } + if r.Name == "" { + return r.useLongHandleErrorCode("r1") + } + if r.Type == "error" { + if r.Name == "hr" { + return fmt.Sprintf(hrCode, r.Name, syscalldot()) + } else { + return fmt.Sprintf(code, r.Name, syscalldot()) + } + } + s := "" + switch { + case r.Type[0] == '*': + s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type) + case r.Type == "bool": + s = fmt.Sprintf("%s = r0 != 0", r.Name) + default: + s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type) + } + if !r.ReturnsError { + return s + } + return s + "\n\t" + r.useLongHandleErrorCode(r.Name) +} + +// Fn describes syscall function. +type Fn struct { + Name string + Params []*Param + Rets *Rets + PrintTrace bool + confirmproc bool + dllname string + dllfuncname string + src string + // TODO: get rid of this field and just use parameter index instead + curTmpVarIdx int // insure tmp variables have uniq names +} + +// extractParams parses s to extract function parameters. +func extractParams(s string, f *Fn) ([]*Param, error) { + s = trim(s) + if s == "" { + return nil, nil + } + a := strings.Split(s, ",") + ps := make([]*Param, len(a)) + for i := range ps { + s2 := trim(a[i]) + b := strings.Split(s2, " ") + if len(b) != 2 { + b = strings.Split(s2, "\t") + if len(b) != 2 { + return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"") + } + } + ps[i] = &Param{ + Name: trim(b[0]), + Type: trim(b[1]), + fn: f, + tmpVarIdx: -1, + } + } + return ps, nil +} + +// extractSection extracts text out of string s starting after start +// and ending just before end. found return value will indicate success, +// and prefix, body and suffix will contain correspondent parts of string s. +func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) { + s = trim(s) + if strings.HasPrefix(s, string(start)) { + // no prefix + body = s[1:] + } else { + a := strings.SplitN(s, string(start), 2) + if len(a) != 2 { + return "", "", s, false + } + prefix = a[0] + body = a[1] + } + a := strings.SplitN(body, string(end), 2) + if len(a) != 2 { + return "", "", "", false + } + return prefix, a[0], a[1], true +} + +// newFn parses string s and return created function Fn. +func newFn(s string) (*Fn, error) { + s = trim(s) + f := &Fn{ + Rets: &Rets{}, + src: s, + PrintTrace: *printTraceFlag, + } + // function name and args + prefix, body, s, found := extractSection(s, '(', ')') + if !found || prefix == "" { + return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"") + } + f.Name = prefix + var err error + f.Params, err = extractParams(body, f) + if err != nil { + return nil, err + } + // return values + _, body, s, found = extractSection(s, '(', ')') + if found { + r, err := extractParams(body, f) + if err != nil { + return nil, err + } + switch len(r) { + case 0: + case 1: + if r[0].IsError() { + f.Rets.ReturnsError = true + } else { + f.Rets.Name = r[0].Name + f.Rets.Type = r[0].Type + } + case 2: + if !r[1].IsError() { + return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"") + } + f.Rets.ReturnsError = true + f.Rets.Name = r[0].Name + f.Rets.Type = r[0].Type + default: + return nil, errors.New("Too many return values in \"" + f.src + "\"") + } + } + // fail condition + _, body, s, found = extractSection(s, '[', ']') + if found { + f.Rets.FailCond = body + } + // dll and dll function names + s = trim(s) + if s == "" { + return f, nil + } + if !strings.HasPrefix(s, "=") { + return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + } + s = trim(s[1:]) + a := strings.Split(s, ".") + switch len(a) { + case 1: + f.dllfuncname = a[0] + case 2: + f.dllname = a[0] + f.dllfuncname = a[1] + default: + return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + } + if f.dllfuncname[len(f.dllfuncname)-1] == '?' { + f.confirmproc = true + f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1] + } + return f, nil +} + +// DLLName returns DLL name for function f. +func (f *Fn) DLLName() string { + if f.dllname == "" { + return "kernel32" + } + return f.dllname +} + +// DLLName returns DLL function name for function f. +func (f *Fn) DLLFuncName() string { + if f.dllfuncname == "" { + return f.Name + } + return f.dllfuncname +} + +func (f *Fn) ConfirmProc() bool { + return f.confirmproc +} + +// ParamList returns source code for function f parameters. +func (f *Fn) ParamList() string { + return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ") +} + +// HelperParamList returns source code for helper function f parameters. +func (f *Fn) HelperParamList() string { + return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ") +} + +// ParamPrintList returns source code of trace printing part correspondent +// to syscall input parameters. +func (f *Fn) ParamPrintList() string { + return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +} + +// ParamCount return number of syscall parameters for function f. +func (f *Fn) ParamCount() int { + n := 0 + for _, p := range f.Params { + n += len(p.SyscallArgList()) + } + return n +} + +// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/... +// to use. It returns parameter count for correspondent SyscallX function. +func (f *Fn) SyscallParamCount() int { + n := f.ParamCount() + switch { + case n <= 3: + return 3 + case n <= 6: + return 6 + case n <= 9: + return 9 + case n <= 12: + return 12 + case n <= 15: + return 15 + default: + panic("too many arguments to system call") + } +} + +// Syscall determines which SyscallX function to use for function f. +func (f *Fn) Syscall() string { + c := f.SyscallParamCount() + if c == 3 { + return syscalldot() + "Syscall" + } + return syscalldot() + "Syscall" + strconv.Itoa(c) +} + +// SyscallParamList returns source code for SyscallX parameters for function f. +func (f *Fn) SyscallParamList() string { + a := make([]string, 0) + for _, p := range f.Params { + a = append(a, p.SyscallArgList()...) + } + for len(a) < f.SyscallParamCount() { + a = append(a, "0") + } + return strings.Join(a, ", ") +} + +// HelperCallParamList returns source code of call into function f helper. +func (f *Fn) HelperCallParamList() string { + a := make([]string, 0, len(f.Params)) + for _, p := range f.Params { + s := p.Name + if p.Type == "string" { + s = p.tmpVar() + } + a = append(a, s) + } + return strings.Join(a, ", ") +} + +// IsUTF16 is true, if f is W (utf16) function. It is false +// for all A (ascii) functions. +func (_ *Fn) IsUTF16() bool { + return true +} + +// StrconvFunc returns name of Go string to OS string function for f. +func (f *Fn) StrconvFunc() string { + if f.IsUTF16() { + return syscalldot() + "UTF16PtrFromString" + } + return syscalldot() + "BytePtrFromString" +} + +// StrconvType returns Go type name used for OS string for f. +func (f *Fn) StrconvType() string { + if f.IsUTF16() { + return "*uint16" + } + return "*byte" +} + +// HasStringParam is true, if f has at least one string parameter. +// Otherwise it is false. +func (f *Fn) HasStringParam() bool { + for _, p := range f.Params { + if p.Type == "string" { + return true + } + } + return false +} + +var uniqDllFuncName = make(map[string]bool) + +// IsNotDuplicate is true if f is not a duplicated function +func (f *Fn) IsNotDuplicate() bool { + funcName := f.DLLFuncName() + if uniqDllFuncName[funcName] == false { + uniqDllFuncName[funcName] = true + return true + } + return false +} + +// HelperName returns name of function f helper. +func (f *Fn) HelperName() string { + if !f.HasStringParam() { + return f.Name + } + return "_" + f.Name +} + +// Source files and functions. +type Source struct { + Funcs []*Fn + Files []string + StdLibImports []string + ExternalImports []string +} + +func (src *Source) Import(pkg string) { + src.StdLibImports = append(src.StdLibImports, pkg) + sort.Strings(src.StdLibImports) +} + +func (src *Source) ExternalImport(pkg string) { + src.ExternalImports = append(src.ExternalImports, pkg) + sort.Strings(src.ExternalImports) +} + +// ParseFiles parses files listed in fs and extracts all syscall +// functions listed in sys comments. It returns source files +// and functions collection *Source if successful. +func ParseFiles(fs []string) (*Source, error) { + src := &Source{ + Funcs: make([]*Fn, 0), + Files: make([]string, 0), + StdLibImports: []string{ + "unsafe", + }, + ExternalImports: make([]string, 0), + } + for _, file := range fs { + if err := src.ParseFile(file); err != nil { + return nil, err + } + } + return src, nil +} + +// DLLs return dll names for a source set src. +func (src *Source) DLLs() []string { + uniq := make(map[string]bool) + r := make([]string, 0) + for _, f := range src.Funcs { + name := f.DLLName() + if _, found := uniq[name]; !found { + uniq[name] = true + r = append(r, name) + } + } + return r +} + +// ParseFile adds additional file path to a source set src. +func (src *Source) ParseFile(path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + s := bufio.NewScanner(file) + for s.Scan() { + t := trim(s.Text()) + if len(t) < 7 { + continue + } + if !strings.HasPrefix(t, "//sys") { + continue + } + t = t[5:] + if !(t[0] == ' ' || t[0] == '\t') { + continue + } + f, err := newFn(t[1:]) + if err != nil { + return err + } + src.Funcs = append(src.Funcs, f) + } + if err := s.Err(); err != nil { + return err + } + src.Files = append(src.Files, path) + + // get package name + fset := token.NewFileSet() + _, err = file.Seek(0, 0) + if err != nil { + return err + } + pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly) + if err != nil { + return err + } + packageName = pkg.Name.Name + + return nil +} + +// IsStdRepo returns true if src is part of standard library. +func (src *Source) IsStdRepo() (bool, error) { + if len(src.Files) == 0 { + return false, errors.New("no input files provided") + } + abspath, err := filepath.Abs(src.Files[0]) + if err != nil { + return false, err + } + goroot := runtime.GOROOT() + if runtime.GOOS == "windows" { + abspath = strings.ToLower(abspath) + goroot = strings.ToLower(goroot) + } + sep := string(os.PathSeparator) + if !strings.HasSuffix(goroot, sep) { + goroot += sep + } + return strings.HasPrefix(abspath, goroot), nil +} + +// Generate output source file from a source set src. +func (src *Source) Generate(w io.Writer) error { + const ( + pkgStd = iota // any package in std library + pkgXSysWindows // x/sys/windows package + pkgOther + ) + isStdRepo, err := src.IsStdRepo() + if err != nil { + return err + } + var pkgtype int + switch { + case isStdRepo: + pkgtype = pkgStd + case packageName == "windows": + // TODO: this needs better logic than just using package name + pkgtype = pkgXSysWindows + default: + pkgtype = pkgOther + } + if *systemDLL { + switch pkgtype { + case pkgStd: + src.Import("internal/syscall/windows/sysdll") + case pkgXSysWindows: + default: + src.ExternalImport("golang.org/x/sys/windows") + } + } + if *winio { + src.ExternalImport("github.com/Microsoft/go-winio") + } + if packageName != "syscall" { + src.Import("syscall") + } + funcMap := template.FuncMap{ + "packagename": packagename, + "syscalldot": syscalldot, + "newlazydll": func(dll string) string { + arg := "\"" + dll + ".dll\"" + if !*systemDLL { + return syscalldot() + "NewLazyDLL(" + arg + ")" + } + if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") { + arg = strings.Replace(arg, "_", "-", -1) + } + switch pkgtype { + case pkgStd: + return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))" + case pkgXSysWindows: + return "NewLazySystemDLL(" + arg + ")" + default: + return "windows.NewLazySystemDLL(" + arg + ")" + } + }, + } + t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate)) + err = t.Execute(w, src) + if err != nil { + return errors.New("Failed to execute template: " + err.Error()) + } + return nil +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n") + flag.PrintDefaults() + os.Exit(1) +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + src, err := ParseFiles(flag.Args()) + if err != nil { + log.Fatal(err) + } + + var buf bytes.Buffer + if err := src.Generate(&buf); err != nil { + log.Fatal(err) + } + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + if *filename == "" { + _, err = os.Stdout.Write(data) + } else { + err = ioutil.WriteFile(*filename, data, 0644) + } + if err != nil { + log.Fatal(err) + } +} + +// TODO: use println instead to print in the following template +const srcTemplate = ` + +{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT + +package {{packagename}} + +import ( +{{range .StdLibImports}}"{{.}}" +{{end}} + +{{range .ExternalImports}}"{{.}}" +{{end}} +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e {{syscalldot}}Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( +{{template "dlls" .}} +{{template "funcnames" .}}) +{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}} +{{end}} + +{{/* help functions */}} + +{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}} +{{end}}{{end}} + +{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}} +{{end}}{{end}} + +{{define "helperbody"}} +func {{.Name}}({{.ParamList}}) {{template "results" .}}{ +{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}}) +} +{{end}} + +{{define "funcbody"}} +func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{ +{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}} +{{template "seterror" .}}{{template "printtrace" .}} return +} +{{end}} + +{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}} +{{end}}{{end}}{{end}} + +{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}} +{{end}}{{end}}{{end}} + +{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}} + +{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}} + +{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil { + return +} +{{end}}{{end}} + + +{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}} +{{end}}{{end}} + +{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n") +{{end}}{{end}} + +` diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go new file mode 100644 index 00000000..916950c0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion.go @@ -0,0 +1,51 @@ +package osversion + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 00000000..2d9567f6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,10 @@ +package osversion + +const ( + + // RS2 was a client-only release in case you're asking why it's not in the list. + RS1 = 14393 + RS3 = 16299 + RS4 = 17134 + RS5 = 17763 +) diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE new file mode 100644 index 00000000..5f9d59f1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE @@ -0,0 +1,22 @@ +go-runhcs is a fork of go-runc + +The following is runc's legal notice. + +--- + +runc + +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go new file mode 100644 index 00000000..64491a70 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs.go @@ -0,0 +1,173 @@ +package runhcs + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "sync/atomic" + + irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/containerd/go-runc" +) + +// Format is the type of log formatting options available. +type Format string + +const ( + none Format = "" + // Text is the default text log ouput. + Text Format = "text" + // JSON is the JSON formatted log output. + JSON Format = "json" +) + +var runhcsPath atomic.Value + +func getCommandPath() string { + const command = "runhcs.exe" + + pathi := runhcsPath.Load() + if pathi == nil { + path, err := exec.LookPath(command) + if err != nil { + // LookPath only finds current directory matches based on the + // callers current directory but the caller is not likely in the + // same directory as the containerd executables. Instead match the + // calling binaries path (a containerd shim usually) and see if they + // are side by side. If so execute the runhcs.exe found there. + if self, serr := os.Executable(); serr == nil { + testPath := filepath.Join(filepath.Dir(self), command) + if _, serr := os.Stat(testPath); serr == nil { + path = testPath + } + } + if path == "" { + // Failed to look up command just use it directly and let the + // Windows loader find it. + path = command + } + runhcsPath.Store(path) + return path + } + apath, err := filepath.Abs(path) + if err != nil { + // We couldnt make `path` an `AbsPath`. Just use `path` directly and + // let the Windows loader find it. + apath = path + } + runhcsPath.Store(apath) + return apath + } + return pathi.(string) +} + +var bytesBufferPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(nil) + }, +} + +func getBuf() *bytes.Buffer { + return bytesBufferPool.Get().(*bytes.Buffer) +} + +func putBuf(b *bytes.Buffer) { + b.Reset() + bytesBufferPool.Put(b) +} + +// Runhcs is the client to the runhcs cli +type Runhcs struct { + // Debug enables debug output for logging. + Debug bool + // Log sets the log file path or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-log) where internal debug information is written. + Log string + // LogFormat sets the format used by logs. + LogFormat Format + // Owner sets the compute system owner property. + Owner string + // Root is the registry key root for storage of runhcs container state. + Root string +} + +func (r *Runhcs) args() []string { + var out []string + if r.Debug { + out = append(out, "--debug") + } + if r.Log != "" { + if strings.HasPrefix(r.Log, irunhcs.SafePipePrefix) { + out = append(out, "--log", r.Log) + } else { + abs, err := filepath.Abs(r.Log) + if err == nil { + out = append(out, "--log", abs) + } + } + } + if r.LogFormat != none { + out = append(out, "--log-format", string(r.LogFormat)) + } + if r.Owner != "" { + out = append(out, "--owner", r.Owner) + } + if r.Root != "" { + out = append(out, "--root", r.Root) + } + return out +} + +func (r *Runhcs) command(context context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(context, getCommandPath(), append(r.args(), args...)...) + cmd.Env = os.Environ() + return cmd +} + +// runOrError will run the provided command. If an error is +// encountered and neither Stdout or Stderr was set the error and the +// stderr of the command will be returned in the format of : +// +func (r *Runhcs) runOrError(cmd *exec.Cmd) error { + if cmd.Stdout != nil || cmd.Stderr != nil { + ec, err := runc.Monitor.Start(cmd) + if err != nil { + return err + } + status, err := runc.Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return err + } + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil +} + +func cmdOutput(cmd *exec.Cmd, combined bool) ([]byte, error) { + b := getBuf() + defer putBuf(b) + + cmd.Stdout = b + if combined { + cmd.Stderr = b + } + ec, err := runc.Monitor.Start(cmd) + if err != nil { + return nil, err + } + + status, err := runc.Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + + return b.Bytes(), err +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go new file mode 100644 index 00000000..3b53b399 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create-scratch.go @@ -0,0 +1,10 @@ +package runhcs + +import ( + "context" +) + +// CreateScratch creates a scratch vhdx at 'destpath' that is ext4 formatted. +func (r *Runhcs) CreateScratch(context context.Context, destpath string) error { + return r.runOrError(r.command(context, "create-scratch", "--destpath", destpath)) +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go new file mode 100644 index 00000000..b10001e4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_create.go @@ -0,0 +1,101 @@ +package runhcs + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" + runc "github.com/containerd/go-runc" +) + +// CreateOpts is set of options that can be used with the Create command. +type CreateOpts struct { + runc.IO + // PidFile is the path to the file to write the process id to. + PidFile string + // ShimLog is the path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs--shim-log) for the launched shim process. + ShimLog string + // VMLog is the path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs--vm-log) for the launched VM shim process. + VMLog string + // VMConsole is the path to the pipe for the VM's console (e.g. \\.\pipe\debugpipe) + VMConsole string +} + +func (opt *CreateOpts) args() ([]string, error) { + var out []string + if opt.PidFile != "" { + abs, err := filepath.Abs(opt.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if opt.ShimLog != "" { + if strings.HasPrefix(opt.ShimLog, irunhcs.SafePipePrefix) { + out = append(out, "--shim-log", opt.ShimLog) + } else { + abs, err := filepath.Abs(opt.ShimLog) + if err != nil { + return nil, err + } + out = append(out, "--shim-log", abs) + } + } + if opt.VMLog != "" { + if strings.HasPrefix(opt.VMLog, irunhcs.SafePipePrefix) { + out = append(out, "--vm-log", opt.VMLog) + } else { + abs, err := filepath.Abs(opt.VMLog) + if err != nil { + return nil, err + } + out = append(out, "--vm-log", abs) + } + } + if opt.VMConsole != "" { + out = append(out, "--vm-console", opt.VMConsole) + } + return out, nil +} + +// Create creates a new container and returns its pid if it was created +// successfully. +func (r *Runhcs) Create(context context.Context, id, bundle string, opts *CreateOpts) error { + args := []string{"create", "--bundle", bundle} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil + } + ec, err := runc.Monitor.Start(cmd) + if err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(runc.StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + status, err := runc.Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go new file mode 100644 index 00000000..08b82bbd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_delete.go @@ -0,0 +1,33 @@ +package runhcs + +import ( + "context" +) + +// DeleteOpts is set of options that can be used with the Delete command. +type DeleteOpts struct { + // Force forcibly deletes the container if it is still running (uses SIGKILL). + Force bool +} + +func (opt *DeleteOpts) args() ([]string, error) { + var out []string + if opt.Force { + out = append(out, "--force") + } + return out, nil +} + +// Delete any resources held by the container often used with detached +// containers. +func (r *Runhcs) Delete(context context.Context, id string, opts *DeleteOpts) error { + args := []string{"delete"} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + return r.runOrError(r.command(context, append(args, id)...)) +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go new file mode 100644 index 00000000..090a0a31 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_exec.go @@ -0,0 +1,88 @@ +package runhcs + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/containerd/go-runc" +) + +// ExecOpts is set of options that can be used with the Exec command. +type ExecOpts struct { + runc.IO + // Detach from the container's process. + Detach bool + // PidFile is the path to the file to write the process id to. + PidFile string + // ShimLog is the path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs---log) for the launched shim process. + ShimLog string +} + +func (opt *ExecOpts) args() ([]string, error) { + var out []string + if opt.Detach { + out = append(out, "--detach") + } + if opt.PidFile != "" { + abs, err := filepath.Abs(opt.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if opt.ShimLog != "" { + if strings.HasPrefix(opt.ShimLog, irunhcs.SafePipePrefix) { + out = append(out, "--shim-log", opt.ShimLog) + } else { + abs, err := filepath.Abs(opt.ShimLog) + if err != nil { + return nil, err + } + out = append(out, "--shim-log", abs) + } + } + return out, nil +} + +// Exec executes an additional process inside the container based on the +// oci.Process spec found at processFile. +func (r *Runhcs) Exec(context context.Context, id, processFile string, opts *ExecOpts) error { + args := []string{"exec", "--process", processFile} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := cmdOutput(cmd, true) + if err != nil { + return fmt.Errorf("%s: %s", err, data) + } + return nil + } + ec, err := runc.Monitor.Start(cmd) + if err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(runc.StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + status, err := runc.Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0]) + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go new file mode 100644 index 00000000..021e5b16 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_kill.go @@ -0,0 +1,11 @@ +package runhcs + +import ( + "context" +) + +// Kill sends the specified signal (default: SIGTERM) to the container's init +// process. +func (r *Runhcs) Kill(context context.Context, id, signal string) error { + return r.runOrError(r.command(context, "kill", id, signal)) +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go new file mode 100644 index 00000000..3b920801 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_list.go @@ -0,0 +1,28 @@ +package runhcs + +import ( + "context" + "encoding/json" + + irunhcs "github.com/Microsoft/hcsshim/internal/runhcs" +) + +// ContainerState is the representation of the containers state at the moment of +// query. +type ContainerState = irunhcs.ContainerState + +// List containers started by runhcs. +// +// Note: This is specific to the Runhcs.Root namespace provided in the global +// settings. +func (r *Runhcs) List(context context.Context) ([]*ContainerState, error) { + data, err := cmdOutput(r.command(context, "list", "--format=json"), false) + if err != nil { + return nil, err + } + var out []*ContainerState + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return out, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go new file mode 100644 index 00000000..56392fa4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_pause.go @@ -0,0 +1,10 @@ +package runhcs + +import ( + "context" +) + +// Pause suspends all processes inside the container. +func (r *Runhcs) Pause(context context.Context, id string) error { + return r.runOrError(r.command(context, "pause", id)) +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go new file mode 100644 index 00000000..4dc9f144 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_ps.go @@ -0,0 +1,20 @@ +package runhcs + +import ( + "context" + "encoding/json" + "fmt" +) + +// Ps displays the processes running inside a container. +func (r *Runhcs) Ps(context context.Context, id string) ([]int, error) { + data, err := cmdOutput(r.command(context, "ps", "--format=json", id), true) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data) + } + var out []int + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return out, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go new file mode 100644 index 00000000..b9f90491 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resize-tty.go @@ -0,0 +1,33 @@ +package runhcs + +import ( + "context" + "strconv" +) + +// ResizeTTYOpts is set of options that can be used with the ResizeTTY command. +type ResizeTTYOpts struct { + // Pid is the process pid (defaults to init pid). + Pid *int +} + +func (opt *ResizeTTYOpts) args() ([]string, error) { + var out []string + if opt.Pid != nil { + out = append(out, "--pid", strconv.Itoa(*opt.Pid)) + } + return out, nil +} + +// ResizeTTY updates the terminal size for a container process. +func (r *Runhcs) ResizeTTY(context context.Context, id string, width, height uint16, opts *ResizeTTYOpts) error { + args := []string{"resize-tty"} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + return r.runOrError(r.command(context, append(args, id, strconv.FormatUint(uint64(width), 10), strconv.FormatUint(uint64(height), 10))...)) +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go new file mode 100644 index 00000000..1fdeb87d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_resume.go @@ -0,0 +1,10 @@ +package runhcs + +import ( + "context" +) + +// Resume resumes all processes that have been previously paused. +func (r *Runhcs) Resume(context context.Context, id string) error { + return r.runOrError(r.command(context, "resume", id)) +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go new file mode 100644 index 00000000..ad3df746 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_start.go @@ -0,0 +1,10 @@ +package runhcs + +import ( + "context" +) + +// Start will start an already created container. +func (r *Runhcs) Start(context context.Context, id string) error { + return r.runOrError(r.command(context, "start", id)) +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go new file mode 100644 index 00000000..b22bb079 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_state.go @@ -0,0 +1,20 @@ +package runhcs + +import ( + "context" + "encoding/json" + "fmt" +) + +// State outputs the state of a container. +func (r *Runhcs) State(context context.Context, id string) (*ContainerState, error) { + data, err := cmdOutput(r.command(context, "state", id), true) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data) + } + var out ContainerState + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return &out, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_test.go b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_test.go new file mode 100644 index 00000000..bdbae1bf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/runhcs_test.go @@ -0,0 +1,68 @@ +package runhcs + +import ( + "os" + "path/filepath" + "sync/atomic" + "testing" +) + +func resetRunhcsPath() { + runhcsPath = atomic.Value{} +} + +func TestGetCommandPath_NoLookPath(t *testing.T) { + resetRunhcsPath() + + path := getCommandPath() + if path != "runhcs.exe" { + t.Fatalf("expected path 'runhcs.exe' got '%s'", path) + } + pathi := runhcsPath.Load() + if pathi == nil { + t.Fatal("cache state should be set after first query") + } + if path != pathi.(string) { + t.Fatalf("expected: '%s' in cache got '%s'", path, pathi.(string)) + } +} + +func TestGetCommandPath_WithLookPath(t *testing.T) { + resetRunhcsPath() + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get cwd with err: %v", err) + } + fakePath := filepath.Join(wd, "runhcs.exe") + f, err := os.Create(fakePath) + if err != nil { + t.Fatalf("failed to create fake runhcs.exe in path with err: %v", err) + } + f.Close() + defer os.Remove(fakePath) + + path := getCommandPath() + if path != fakePath { + t.Fatalf("expected fake path '%s' got '%s'", fakePath, path) + } + pathi := runhcsPath.Load() + if pathi == nil { + t.Fatal("cache state should be set after first query") + } + if path != pathi.(string) { + t.Fatalf("expected: '%s' in cache got '%s'", fakePath, pathi.(string)) + } +} + +func TestGetCommandPath_WithCache(t *testing.T) { + resetRunhcsPath() + + value := "this is a test" + runhcsPath.Store(value) + + path := getCommandPath() + if path != value { + t.Fatalf("expected fake cached path: '%s' got '%s'", value, path) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go new file mode 100644 index 00000000..ca8acbb7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/process.go @@ -0,0 +1,72 @@ +package hcsshim + +import ( + "io" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" +) + +// ContainerError is an error encountered in HCS +type process struct { + p *hcs.Process +} + +// Pid returns the process ID of the process within the container. +func (process *process) Pid() int { + return process.p.Pid() +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *process) Kill() error { + return convertProcessError(process.p.Kill(), process) +} + +// Wait waits for the process to exit. +func (process *process) Wait() error { + return convertProcessError(process.p.Wait(), process) +} + +// WaitTimeout waits for the process to exit or the duration to elapse. It returns +// false if timeout occurs. +func (process *process) WaitTimeout(timeout time.Duration) error { + return convertProcessError(process.p.WaitTimeout(timeout), process) +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *process) ExitCode() (int, error) { + code, err := process.p.ExitCode() + if err != nil { + err = convertProcessError(err, process) + } + return code, err +} + +// ResizeConsole resizes the console of the process. +func (process *process) ResizeConsole(width, height uint16) error { + return convertProcessError(process.p.ResizeConsole(width, height), process) +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes; it should be possible to +// call this multiple times to get multiple interfaces. +func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { + stdin, stdout, stderr, err := process.p.Stdio() + if err != nil { + err = convertProcessError(err, process) + } + return stdin, stdout, stderr, err +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *process) CloseStdin() error { + return convertProcessError(process.p.CloseStdin(), process) +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *process) Close() error { + return convertProcessError(process.p.Close(), process) +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/assets/defaultlinuxspec.json b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/defaultlinuxspec.json new file mode 100644 index 00000000..a940de8d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/defaultlinuxspec.json @@ -0,0 +1,257 @@ +{ + "ociVersion":"1.0.1", + "process":{ + "user":{ + "uid":0, + "gid":0 + }, + "args":null, + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "TERM=xterm" + ], + "cwd":"/", + "capabilities":{ + "bounding":[ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "effective":[ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "inheritable":[ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "permitted":[ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ] + } + }, + "mounts":[ + { + "destination":"/proc", + "type":"proc", + "source":"proc", + "options":[ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination":"/dev", + "type":"tmpfs", + "source":"tmpfs", + "options":[ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ] + }, + { + "destination":"/dev/pts", + "type":"devpts", + "source":"devpts", + "options":[ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ] + }, + { + "destination":"/sys", + "type":"sysfs", + "source":"sysfs", + "options":[ + "nosuid", + "noexec", + "nodev", + "ro" + ] + }, + { + "destination":"/sys/fs/cgroup", + "type":"cgroup", + "source":"cgroup", + "options":[ + "ro", + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination":"/dev/mqueue", + "type":"mqueue", + "source":"mqueue", + "options":[ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination":"/dev/shm", + "type":"tmpfs", + "source":"shm", + "options":[ + "nosuid", + "noexec", + "nodev", + "mode=1777" + ] + } + ], + "linux":{ + "resources":{ + "devices":[ + { + "allow":false, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":1, + "minor":5, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":1, + "minor":3, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":1, + "minor":9, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":1, + "minor":8, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":5, + "minor":0, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":5, + "minor":1, + "access":"rwm" + }, + { + "allow":false, + "type":"c", + "major":10, + "minor":229, + "access":"rwm" + } + ] + }, + "namespaces":[ + { + "type":"mount" + }, + { + "type":"network" + }, + { + "type":"uts" + }, + { + "type":"pid" + }, + { + "type":"ipc" + } + ], + "maskedPaths":[ + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "readonlyPaths":[ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ] + }, + "windows":{ + "layerFolders":null + } +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/assets/defaultwindowsspec.json b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/defaultwindowsspec.json new file mode 100644 index 00000000..7426c982 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/defaultwindowsspec.json @@ -0,0 +1,11 @@ +{ + "ociVersion": "1.0.1", + "process": { + "args": null, + "env": [], + "cwd": "c:\\" + }, + "windows": { + "layerFolders": null + } +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/config.justin.lcow.working.json b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/config.justin.lcow.working.json new file mode 100644 index 00000000..b29f17be --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/config.justin.lcow.working.json @@ -0,0 +1,254 @@ +{ + "ociVersion":"1.0.1", + "process":{ + "user":{ + "uid":0, + "gid":0 + }, + "args":["/bin/sh", "-c", "echo hello world"], + "cwd":"/", + "capabilities":{ + "bounding":[ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "effective":[ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "inheritable":[ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "permitted":[ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ] + } + }, + "mounts":[ + { + "destination":"/proc", + "type":"proc", + "source":"proc", + "options":[ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination":"/dev", + "type":"tmpfs", + "source":"tmpfs", + "options":[ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ] + }, + { + "destination":"/dev/pts", + "type":"devpts", + "source":"devpts", + "options":[ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ] + }, + { + "destination":"/sys", + "type":"sysfs", + "source":"sysfs", + "options":[ + "nosuid", + "noexec", + "nodev", + "ro" + ] + }, + { + "destination":"/sys/fs/cgroup", + "type":"cgroup", + "source":"cgroup", + "options":[ + "ro", + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination":"/dev/mqueue", + "type":"mqueue", + "source":"mqueue", + "options":[ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination":"/dev/shm", + "type":"tmpfs", + "source":"shm", + "options":[ + "nosuid", + "noexec", + "nodev", + "mode=1777" + ] + } + ], + "linux":{ + "resources":{ + "devices":[ + { + "allow":false, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":1, + "minor":5, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":1, + "minor":3, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":1, + "minor":9, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":1, + "minor":8, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":5, + "minor":0, + "access":"rwm" + }, + { + "allow":true, + "type":"c", + "major":5, + "minor":1, + "access":"rwm" + }, + { + "allow":false, + "type":"c", + "major":10, + "minor":229, + "access":"rwm" + } + ] + }, + "namespaces":[ + { + "type":"mount" + }, + { + "type":"network" + }, + { + "type":"uts" + }, + { + "type":"pid" + }, + { + "type":"ipc" + } + ], + "maskedPaths":[ + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "readonlyPaths":[ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ] + }, + "windows":{ + "HyperV": {}, + "layerFolders":["C:\\docker_images\\lcow\\bcc7d1fc0b8294c100274bb07984400ac1af6d375cb583672d5071c855c73cc2", "C:\\docker_images\\lcow\\c718cb96ac6354b411660c24a4a54e8c8cb3052422b43589ea6af5a745ded451"] + } +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/from-docker-linux/privileged.json b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/from-docker-linux/privileged.json new file mode 100644 index 00000000..8dfc338b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/from-docker-linux/privileged.json @@ -0,0 +1,2006 @@ +{ + "ociVersion": "1.0.1", + "process": { + "terminal": true, + "user": { + "uid": 0, + "gid": 0 + }, + "args": [ + "sh" + ], + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=b60f381d92f9", + "TERM=xterm" + ], + "cwd": "/", + "capabilities": { + "bounding": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_MAC_OVERRIDE", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "effective": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_MAC_OVERRIDE", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_MAC_OVERRIDE", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_MAC_OVERRIDE", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ] + }, + "apparmorProfile": "unconfined", + "oomScoreAdj": 0 + }, + "root": { + "path": "/var/lib/docker/overlay2/1e71397e037a7edf6a1a85ff10fe42d58743e5752bb9a56ae4d0af615f22f7d0/merged" + }, + "hostname": "b60f381d92f9", + "mounts": [ + { + "destination": "/proc", + "type": "proc", + "source": "proc", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/dev", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ] + }, + { + "destination": "/dev/pts", + "type": "devpts", + "source": "devpts", + "options": [ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ] + }, + { + "destination": "/sys", + "type": "sysfs", + "source": "sysfs", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/sys/fs/cgroup", + "type": "cgroup", + "source": "cgroup", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/dev/mqueue", + "type": "mqueue", + "source": "mqueue", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/etc/resolv.conf", + "type": "bind", + "source": "/var/lib/docker/containers/b60f381d92f9dd7533c8e3b173f76166bc7dee4ca9adab93789e0458a996bbc5/resolv.conf", + "options": [ + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/hostname", + "type": "bind", + "source": "/var/lib/docker/containers/b60f381d92f9dd7533c8e3b173f76166bc7dee4ca9adab93789e0458a996bbc5/hostname", + "options": [ + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/hosts", + "type": "bind", + "source": "/var/lib/docker/containers/b60f381d92f9dd7533c8e3b173f76166bc7dee4ca9adab93789e0458a996bbc5/hosts", + "options": [ + "rbind", + "rprivate" + ] + }, + { + "destination": "/dev/shm", + "type": "bind", + "source": "/var/lib/docker/containers/b60f381d92f9dd7533c8e3b173f76166bc7dee4ca9adab93789e0458a996bbc5/mounts/shm", + "options": [ + "rbind", + "rprivate" + ] + } + ], + "hooks": { + "prestart": [ + { + "path": "/usr/bin/dockerd", + "args": [ + "libnetwork-setkey", + "b60f381d92f9dd7533c8e3b173f76166bc7dee4ca9adab93789e0458a996bbc5", + "6f39a30d5033cbfcffe8eab1fb7d9f1fdbe21374b9e989253b0201046224bbe1" + ] + } + ] + }, + "linux": { + "resources": { + "devices": [ + { + "allow": true, + "access": "rwm" + } + ], + "memory": {}, + "cpu": { + "shares": 0 + }, + "pids": { + "limit": 0 + }, + "blockIO": { + "weight": 0 + } + }, + "cgroupsPath": "/docker/b60f381d92f9dd7533c8e3b173f76166bc7dee4ca9adab93789e0458a996bbc5", + "namespaces": [ + { + "type": "mount" + }, + { + "type": "network" + }, + { + "type": "uts" + }, + { + "type": "pid" + }, + { + "type": "ipc" + } + ], + "devices": [ + { + "path": "/dev/autofs", + "type": "c", + "major": 10, + "minor": 235, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/bsg/0:0:0:0", + "type": "c", + "major": 249, + "minor": 0, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/bsg/0:0:0:1", + "type": "c", + "major": 249, + "minor": 1, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/btrfs-control", + "type": "c", + "major": 10, + "minor": 234, + "fileMode": 8624, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/cpu/0/cpuid", + "type": "c", + "major": 203, + "minor": 0, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/cpu/1/cpuid", + "type": "c", + "major": 203, + "minor": 1, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/cpu/2/cpuid", + "type": "c", + "major": 203, + "minor": 2, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/cpu/3/cpuid", + "type": "c", + "major": 203, + "minor": 3, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/cpu/4/cpuid", + "type": "c", + "major": 203, + "minor": 4, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/cpu/5/cpuid", + "type": "c", + "major": 203, + "minor": 5, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/cpu/6/cpuid", + "type": "c", + "major": 203, + "minor": 6, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/cpu/7/cpuid", + "type": "c", + "major": 203, + "minor": 7, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/cpu_dma_latency", + "type": "c", + "major": 10, + "minor": 59, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/cuse", + "type": "c", + "major": 10, + "minor": 203, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/ecryptfs", + "type": "c", + "major": 10, + "minor": 61, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/fb0", + "type": "c", + "major": 29, + "minor": 0, + "fileMode": 8624, + "uid": 0, + "gid": 44 + }, + { + "path": "/dev/full", + "type": "c", + "major": 1, + "minor": 7, + "fileMode": 8630, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/fuse", + "type": "c", + "major": 10, + "minor": 229, + "fileMode": 8630, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/hpet", + "type": "c", + "major": 10, + "minor": 228, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/hwrng", + "type": "c", + "major": 10, + "minor": 183, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/input/event0", + "type": "c", + "major": 13, + "minor": 64, + "fileMode": 8624, + "uid": 0, + "gid": 106 + }, + { + "path": "/dev/input/event1", + "type": "c", + "major": 13, + "minor": 65, + "fileMode": 8624, + "uid": 0, + "gid": 106 + }, + { + "path": "/dev/input/js0", + "type": "c", + "major": 13, + "minor": 0, + "fileMode": 8628, + "uid": 0, + "gid": 106 + }, + { + "path": "/dev/input/mice", + "type": "c", + "major": 13, + "minor": 63, + "fileMode": 8624, + "uid": 0, + "gid": 106 + }, + { + "path": "/dev/input/mouse0", + "type": "c", + "major": 13, + "minor": 32, + "fileMode": 8624, + "uid": 0, + "gid": 106 + }, + { + "path": "/dev/kmsg", + "type": "c", + "major": 1, + "minor": 11, + "fileMode": 8612, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/lightnvm/control", + "type": "c", + "major": 10, + "minor": 60, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/loop-control", + "type": "c", + "major": 10, + "minor": 237, + "fileMode": 8624, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/loop0", + "type": "b", + "major": 7, + "minor": 0, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/loop1", + "type": "b", + "major": 7, + "minor": 1, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/loop2", + "type": "b", + "major": 7, + "minor": 2, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/loop3", + "type": "b", + "major": 7, + "minor": 3, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/loop4", + "type": "b", + "major": 7, + "minor": 4, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/loop5", + "type": "b", + "major": 7, + "minor": 5, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/loop6", + "type": "b", + "major": 7, + "minor": 6, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/loop7", + "type": "b", + "major": 7, + "minor": 7, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/mapper/control", + "type": "c", + "major": 10, + "minor": 236, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/mcelog", + "type": "c", + "major": 10, + "minor": 227, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/mem", + "type": "c", + "major": 1, + "minor": 1, + "fileMode": 8608, + "uid": 0, + "gid": 15 + }, + { + "path": "/dev/memory_bandwidth", + "type": "c", + "major": 10, + "minor": 56, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/net/tun", + "type": "c", + "major": 10, + "minor": 200, + "fileMode": 8630, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/network_latency", + "type": "c", + "major": 10, + "minor": 58, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/network_throughput", + "type": "c", + "major": 10, + "minor": 57, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/null", + "type": "c", + "major": 1, + "minor": 3, + "fileMode": 8630, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/port", + "type": "c", + "major": 1, + "minor": 4, + "fileMode": 8608, + "uid": 0, + "gid": 15 + }, + { + "path": "/dev/ppp", + "type": "c", + "major": 108, + "minor": 0, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/psaux", + "type": "c", + "major": 10, + "minor": 1, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/ptmx", + "type": "c", + "major": 5, + "minor": 2, + "fileMode": 8630, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/ptp0", + "type": "c", + "major": 246, + "minor": 0, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/random", + "type": "c", + "major": 1, + "minor": 8, + "fileMode": 8630, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/rfkill", + "type": "c", + "major": 10, + "minor": 62, + "fileMode": 8628, + "uid": 0, + "gid": 109 + }, + { + "path": "/dev/rtc0", + "type": "c", + "major": 251, + "minor": 0, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/sda", + "type": "b", + "major": 8, + "minor": 0, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/sda1", + "type": "b", + "major": 8, + "minor": 1, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/sda2", + "type": "b", + "major": 8, + "minor": 2, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/sda3", + "type": "b", + "major": 8, + "minor": 3, + "fileMode": 25008, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/sg0", + "type": "c", + "major": 21, + "minor": 0, + "fileMode": 8624, + "uid": 0, + "gid": 6 + }, + { + "path": "/dev/sg1", + "type": "c", + "major": 21, + "minor": 1, + "fileMode": 8624, + "uid": 0, + "gid": 24 + }, + { + "path": "/dev/snapshot", + "type": "c", + "major": 10, + "minor": 231, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/snd/seq", + "type": "c", + "major": 116, + "minor": 1, + "fileMode": 8624, + "uid": 0, + "gid": 29 + }, + { + "path": "/dev/snd/timer", + "type": "c", + "major": 116, + "minor": 33, + "fileMode": 8624, + "uid": 0, + "gid": 29 + }, + { + "path": "/dev/sr0", + "type": "b", + "major": 11, + "minor": 0, + "fileMode": 25008, + "uid": 0, + "gid": 24 + }, + { + "path": "/dev/tty", + "type": "c", + "major": 5, + "minor": 0, + "fileMode": 8630, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty0", + "type": "c", + "major": 4, + "minor": 0, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty1", + "type": "c", + "major": 4, + "minor": 1, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty10", + "type": "c", + "major": 4, + "minor": 10, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty11", + "type": "c", + "major": 4, + "minor": 11, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty12", + "type": "c", + "major": 4, + "minor": 12, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty13", + "type": "c", + "major": 4, + "minor": 13, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty14", + "type": "c", + "major": 4, + "minor": 14, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty15", + "type": "c", + "major": 4, + "minor": 15, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty16", + "type": "c", + "major": 4, + "minor": 16, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty17", + "type": "c", + "major": 4, + "minor": 17, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty18", + "type": "c", + "major": 4, + "minor": 18, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty19", + "type": "c", + "major": 4, + "minor": 19, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty2", + "type": "c", + "major": 4, + "minor": 2, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty20", + "type": "c", + "major": 4, + "minor": 20, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty21", + "type": "c", + "major": 4, + "minor": 21, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty22", + "type": "c", + "major": 4, + "minor": 22, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty23", + "type": "c", + "major": 4, + "minor": 23, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty24", + "type": "c", + "major": 4, + "minor": 24, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty25", + "type": "c", + "major": 4, + "minor": 25, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty26", + "type": "c", + "major": 4, + "minor": 26, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty27", + "type": "c", + "major": 4, + "minor": 27, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty28", + "type": "c", + "major": 4, + "minor": 28, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty29", + "type": "c", + "major": 4, + "minor": 29, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty3", + "type": "c", + "major": 4, + "minor": 3, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty30", + "type": "c", + "major": 4, + "minor": 30, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty31", + "type": "c", + "major": 4, + "minor": 31, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty32", + "type": "c", + "major": 4, + "minor": 32, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty33", + "type": "c", + "major": 4, + "minor": 33, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty34", + "type": "c", + "major": 4, + "minor": 34, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty35", + "type": "c", + "major": 4, + "minor": 35, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty36", + "type": "c", + "major": 4, + "minor": 36, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty37", + "type": "c", + "major": 4, + "minor": 37, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty38", + "type": "c", + "major": 4, + "minor": 38, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty39", + "type": "c", + "major": 4, + "minor": 39, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty4", + "type": "c", + "major": 4, + "minor": 4, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty40", + "type": "c", + "major": 4, + "minor": 40, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty41", + "type": "c", + "major": 4, + "minor": 41, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty42", + "type": "c", + "major": 4, + "minor": 42, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty43", + "type": "c", + "major": 4, + "minor": 43, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty44", + "type": "c", + "major": 4, + "minor": 44, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty45", + "type": "c", + "major": 4, + "minor": 45, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty46", + "type": "c", + "major": 4, + "minor": 46, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty47", + "type": "c", + "major": 4, + "minor": 47, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty48", + "type": "c", + "major": 4, + "minor": 48, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty49", + "type": "c", + "major": 4, + "minor": 49, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty5", + "type": "c", + "major": 4, + "minor": 5, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty50", + "type": "c", + "major": 4, + "minor": 50, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty51", + "type": "c", + "major": 4, + "minor": 51, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty52", + "type": "c", + "major": 4, + "minor": 52, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty53", + "type": "c", + "major": 4, + "minor": 53, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty54", + "type": "c", + "major": 4, + "minor": 54, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty55", + "type": "c", + "major": 4, + "minor": 55, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty56", + "type": "c", + "major": 4, + "minor": 56, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty57", + "type": "c", + "major": 4, + "minor": 57, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty58", + "type": "c", + "major": 4, + "minor": 58, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty59", + "type": "c", + "major": 4, + "minor": 59, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty6", + "type": "c", + "major": 4, + "minor": 6, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty60", + "type": "c", + "major": 4, + "minor": 60, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty61", + "type": "c", + "major": 4, + "minor": 61, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty62", + "type": "c", + "major": 4, + "minor": 62, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty63", + "type": "c", + "major": 4, + "minor": 63, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty7", + "type": "c", + "major": 4, + "minor": 7, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty8", + "type": "c", + "major": 4, + "minor": 8, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/tty9", + "type": "c", + "major": 4, + "minor": 9, + "fileMode": 8592, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/ttyS0", + "type": "c", + "major": 4, + "minor": 64, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS1", + "type": "c", + "major": 4, + "minor": 65, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS10", + "type": "c", + "major": 4, + "minor": 74, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS11", + "type": "c", + "major": 4, + "minor": 75, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS12", + "type": "c", + "major": 4, + "minor": 76, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS13", + "type": "c", + "major": 4, + "minor": 77, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS14", + "type": "c", + "major": 4, + "minor": 78, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS15", + "type": "c", + "major": 4, + "minor": 79, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS16", + "type": "c", + "major": 4, + "minor": 80, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS17", + "type": "c", + "major": 4, + "minor": 81, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS18", + "type": "c", + "major": 4, + "minor": 82, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS19", + "type": "c", + "major": 4, + "minor": 83, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS2", + "type": "c", + "major": 4, + "minor": 66, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS20", + "type": "c", + "major": 4, + "minor": 84, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS21", + "type": "c", + "major": 4, + "minor": 85, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS22", + "type": "c", + "major": 4, + "minor": 86, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS23", + "type": "c", + "major": 4, + "minor": 87, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS24", + "type": "c", + "major": 4, + "minor": 88, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS25", + "type": "c", + "major": 4, + "minor": 89, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS26", + "type": "c", + "major": 4, + "minor": 90, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS27", + "type": "c", + "major": 4, + "minor": 91, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS28", + "type": "c", + "major": 4, + "minor": 92, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS29", + "type": "c", + "major": 4, + "minor": 93, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS3", + "type": "c", + "major": 4, + "minor": 67, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS30", + "type": "c", + "major": 4, + "minor": 94, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS31", + "type": "c", + "major": 4, + "minor": 95, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS4", + "type": "c", + "major": 4, + "minor": 68, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS5", + "type": "c", + "major": 4, + "minor": 69, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS6", + "type": "c", + "major": 4, + "minor": 70, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS7", + "type": "c", + "major": 4, + "minor": 71, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS8", + "type": "c", + "major": 4, + "minor": 72, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyS9", + "type": "c", + "major": 4, + "minor": 73, + "fileMode": 8624, + "uid": 0, + "gid": 20 + }, + { + "path": "/dev/ttyprintk", + "type": "c", + "major": 5, + "minor": 3, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/uhid", + "type": "c", + "major": 10, + "minor": 239, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/uinput", + "type": "c", + "major": 10, + "minor": 223, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/urandom", + "type": "c", + "major": 1, + "minor": 9, + "fileMode": 8630, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/userio", + "type": "c", + "major": 10, + "minor": 240, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/vcs", + "type": "c", + "major": 7, + "minor": 0, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcs1", + "type": "c", + "major": 7, + "minor": 1, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcs2", + "type": "c", + "major": 7, + "minor": 2, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcs3", + "type": "c", + "major": 7, + "minor": 3, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcs4", + "type": "c", + "major": 7, + "minor": 4, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcs5", + "type": "c", + "major": 7, + "minor": 5, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcs6", + "type": "c", + "major": 7, + "minor": 6, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcsa", + "type": "c", + "major": 7, + "minor": 128, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcsa1", + "type": "c", + "major": 7, + "minor": 129, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcsa2", + "type": "c", + "major": 7, + "minor": 130, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcsa3", + "type": "c", + "major": 7, + "minor": 131, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcsa4", + "type": "c", + "major": 7, + "minor": 132, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcsa5", + "type": "c", + "major": 7, + "minor": 133, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vcsa6", + "type": "c", + "major": 7, + "minor": 134, + "fileMode": 8624, + "uid": 0, + "gid": 5 + }, + { + "path": "/dev/vfio/vfio", + "type": "c", + "major": 10, + "minor": 196, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/vga_arbiter", + "type": "c", + "major": 10, + "minor": 63, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/vhci", + "type": "c", + "major": 10, + "minor": 137, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/vhost-net", + "type": "c", + "major": 10, + "minor": 238, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/vmbus/hv_kvp", + "type": "c", + "major": 10, + "minor": 55, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/vmbus/hv_vss", + "type": "c", + "major": 10, + "minor": 54, + "fileMode": 8576, + "uid": 0, + "gid": 0 + }, + { + "path": "/dev/zero", + "type": "c", + "major": 1, + "minor": 5, + "fileMode": 8630, + "uid": 0, + "gid": 0 + } + ] + } +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/from-docker-linux/sh.json b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/from-docker-linux/sh.json new file mode 100644 index 00000000..ea896e48 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/assets/samples/from-docker-linux/sh.json @@ -0,0 +1,2259 @@ +{ + "ociVersion": "1.0.1", + "process": { + "terminal": true, + "user": { + "uid": 0, + "gid": 0 + }, + "args": [ + "sh" + ], + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=51cbacdfc638", + "TERM=xterm" + ], + "cwd": "/", + "capabilities": { + "bounding": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "effective": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE" + ] + }, + "apparmorProfile": "docker-default", + "oomScoreAdj": 0 + }, + "root": { + "path": "/var/lib/docker/overlay2/d245ac747197ec8b9323b4ff0c0467537a5c5dd9faaa334b2b9dfce1df64aede/merged" + }, + "hostname": "51cbacdfc638", + "mounts": [ + { + "destination": "/proc", + "type": "proc", + "source": "proc", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/dev", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ] + }, + { + "destination": "/dev/pts", + "type": "devpts", + "source": "devpts", + "options": [ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ] + }, + { + "destination": "/sys", + "type": "sysfs", + "source": "sysfs", + "options": [ + "nosuid", + "noexec", + "nodev", + "ro" + ] + }, + { + "destination": "/sys/fs/cgroup", + "type": "cgroup", + "source": "cgroup", + "options": [ + "ro", + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/dev/mqueue", + "type": "mqueue", + "source": "mqueue", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/etc/resolv.conf", + "type": "bind", + "source": "/var/lib/docker/containers/51cbacdfc638d5fefc87ca6e3e129ce24f8aa7d80b659b173ce268d97106860a/resolv.conf", + "options": [ + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/hostname", + "type": "bind", + "source": "/var/lib/docker/containers/51cbacdfc638d5fefc87ca6e3e129ce24f8aa7d80b659b173ce268d97106860a/hostname", + "options": [ + "rbind", + "rprivate" + ] + }, + { + "destination": "/etc/hosts", + "type": "bind", + "source": "/var/lib/docker/containers/51cbacdfc638d5fefc87ca6e3e129ce24f8aa7d80b659b173ce268d97106860a/hosts", + "options": [ + "rbind", + "rprivate" + ] + }, + { + "destination": "/dev/shm", + "type": "bind", + "source": "/var/lib/docker/containers/51cbacdfc638d5fefc87ca6e3e129ce24f8aa7d80b659b173ce268d97106860a/mounts/shm", + "options": [ + "rbind", + "rprivate" + ] + } + ], + "hooks": { + "prestart": [ + { + "path": "/usr/bin/dockerd", + "args": [ + "libnetwork-setkey", + "51cbacdfc638d5fefc87ca6e3e129ce24f8aa7d80b659b173ce268d97106860a", + "6f39a30d5033cbfcffe8eab1fb7d9f1fdbe21374b9e989253b0201046224bbe1" + ] + } + ] + }, + "linux": { + "resources": { + "devices": [ + { + "allow": false, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 1, + "minor": 5, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 1, + "minor": 3, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 1, + "minor": 9, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 1, + "minor": 8, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 5, + "minor": 0, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 5, + "minor": 1, + "access": "rwm" + }, + { + "allow": false, + "type": "c", + "major": 10, + "minor": 229, + "access": "rwm" + } + ], + "memory": {}, + "cpu": { + "shares": 0 + }, + "pids": { + "limit": 0 + }, + "blockIO": { + "weight": 0 + } + }, + "cgroupsPath": "/docker/51cbacdfc638d5fefc87ca6e3e129ce24f8aa7d80b659b173ce268d97106860a", + "namespaces": [ + { + "type": "mount" + }, + { + "type": "network" + }, + { + "type": "uts" + }, + { + "type": "pid" + }, + { + "type": "ipc" + } + ], + "seccomp": { + "defaultAction": "SCMP_ACT_ERRNO", + "architectures": [ + "SCMP_ARCH_X86_64", + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ], + "syscalls": [ + { + "names": [ + "accept" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "accept4" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "access" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "adjtimex" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "alarm" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "bind" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "brk" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "capget" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "capset" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "chdir" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "chmod" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "chown" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "chown32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "clock_getres" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "clock_gettime" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "clock_nanosleep" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "close" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "connect" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "copy_file_range" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "creat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "dup" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "dup2" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "dup3" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "epoll_create" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "epoll_create1" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "epoll_ctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "epoll_ctl_old" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "epoll_pwait" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "epoll_wait" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "epoll_wait_old" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "eventfd" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "eventfd2" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "execve" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "execveat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "exit" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "exit_group" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "faccessat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fadvise64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fadvise64_64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fallocate" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fanotify_mark" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fchdir" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fchmod" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fchmodat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fchown" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fchown32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fchownat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fcntl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fcntl64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fdatasync" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fgetxattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "flistxattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "flock" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fork" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fremovexattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fsetxattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fstat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fstat64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fstatat64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fstatfs" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fstatfs64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "fsync" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "ftruncate" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "ftruncate64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "futex" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "futimesat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getcpu" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getcwd" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getdents" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getdents64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getegid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getegid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "geteuid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "geteuid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getgid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getgid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getgroups" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getgroups32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getitimer" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getpeername" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getpgid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getpgrp" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getpid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getppid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getpriority" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getrandom" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getresgid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getresgid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getresuid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getresuid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getrlimit" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "get_robust_list" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getrusage" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getsid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getsockname" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getsockopt" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "get_thread_area" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "gettid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "gettimeofday" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getuid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getuid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "getxattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "inotify_add_watch" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "inotify_init" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "inotify_init1" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "inotify_rm_watch" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "io_cancel" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "ioctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "io_destroy" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "io_getevents" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "ioprio_get" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "ioprio_set" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "io_setup" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "io_submit" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "ipc" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "kill" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "lchown" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "lchown32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "lgetxattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "link" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "linkat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "listen" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "listxattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "llistxattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "_llseek" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "lremovexattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "lseek" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "lsetxattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "lstat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "lstat64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "madvise" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "memfd_create" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mincore" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mkdir" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mkdirat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mknod" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mknodat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mlock" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mlock2" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mlockall" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mmap" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mmap2" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mprotect" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mq_getsetattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mq_notify" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mq_open" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mq_timedreceive" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mq_timedsend" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mq_unlink" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "mremap" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "msgctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "msgget" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "msgrcv" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "msgsnd" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "msync" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "munlock" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "munlockall" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "munmap" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "nanosleep" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "newfstatat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "_newselect" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "open" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "openat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "pause" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "pipe" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "pipe2" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "poll" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "ppoll" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "prctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "pread64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "preadv" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "preadv2" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "prlimit64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "pselect6" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "pwrite64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "pwritev" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "pwritev2" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "read" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "readahead" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "readlink" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "readlinkat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "readv" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "recv" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "recvfrom" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "recvmmsg" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "recvmsg" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "remap_file_pages" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "removexattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rename" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "renameat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "renameat2" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "restart_syscall" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rmdir" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rt_sigaction" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rt_sigpending" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rt_sigprocmask" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rt_sigqueueinfo" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rt_sigreturn" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rt_sigsuspend" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rt_sigtimedwait" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "rt_tgsigqueueinfo" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_getaffinity" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_getattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_getparam" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_get_priority_max" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_get_priority_min" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_getscheduler" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_rr_get_interval" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_setaffinity" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_setattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_setparam" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_setscheduler" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sched_yield" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "seccomp" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "select" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "semctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "semget" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "semop" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "semtimedop" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "send" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sendfile" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sendfile64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sendmmsg" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sendmsg" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sendto" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setfsgid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setfsgid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setfsuid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setfsuid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setgid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setgid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setgroups" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setgroups32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setitimer" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setpgid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setpriority" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setregid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setregid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setresgid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setresgid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setresuid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setresuid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setreuid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setreuid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setrlimit" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "set_robust_list" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setsid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setsockopt" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "set_thread_area" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "set_tid_address" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setuid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setuid32" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "setxattr" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "shmat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "shmctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "shmdt" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "shmget" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "shutdown" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sigaltstack" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "signalfd" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "signalfd4" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sigreturn" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "socketcall" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "socketpair" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "splice" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "stat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "stat64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "statfs" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "statfs64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "symlink" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "symlinkat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sync" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sync_file_range" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "syncfs" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "sysinfo" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "syslog" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "tee" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "tgkill" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "time" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "timer_create" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "timer_delete" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "timerfd_create" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "timerfd_gettime" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "timerfd_settime" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "timer_getoverrun" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "timer_gettime" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "timer_settime" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "times" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "tkill" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "truncate" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "truncate64" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "ugetrlimit" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "umask" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "uname" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "unlink" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "unlinkat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "utime" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "utimensat" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "utimes" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "vfork" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "vmsplice" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "wait4" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "waitid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "waitpid" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "write" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "writev" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131072, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131080, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "names": [ + "chroot" + ], + "action": "SCMP_ACT_ALLOW" + } + ] + }, + "maskedPaths": [ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware" + ], + "readonlyPaths": [ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ] + } +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/lcow_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/lcow_test.go new file mode 100644 index 00000000..6ca4d729 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/lcow_test.go @@ -0,0 +1,224 @@ +// +build functional lcow + +package functional + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcsoci" + "github.com/Microsoft/hcsshim/internal/lcow" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/functional/utilities" +) + +// TestLCOWUVMNoSCSINoVPMemInitrd starts an LCOW utility VM without a SCSI controller and +// no VPMem device. Uses initrd. +func TestLCOWUVMNoSCSINoVPMemInitrd(t *testing.T) { + opts := uvm.NewDefaultOptionsLCOW(t.Name(), "") + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 0 + opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + opts.RootFSFile = uvm.InitrdFile + + testLCOWUVMNoSCSISingleVPMem(t, opts, fmt.Sprintf("Command line: initrd=/%s", opts.RootFSFile)) +} + +// TestLCOWUVMNoSCSISingleVPMemVHD starts an LCOW utility VM without a SCSI controller and +// only a single VPMem device. Uses VPMEM VHD +func TestLCOWUVMNoSCSISingleVPMemVHD(t *testing.T) { + opts := uvm.NewDefaultOptionsLCOW(t.Name(), "") + opts.SCSIControllerCount = 0 + opts.VPMemDeviceCount = 1 + opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + opts.RootFSFile = uvm.VhdFile + + testLCOWUVMNoSCSISingleVPMem(t, opts, `Command line: root=/dev/pmem0 init=/init`) +} + +func testLCOWUVMNoSCSISingleVPMem(t *testing.T, opts *uvm.OptionsLCOW, expected string) { + testutilities.RequiresBuild(t, osversion.RS5) + lcowUVM := testutilities.CreateLCOWUVMFromOpts(t, opts) + defer lcowUVM.Close() + out, err := exec.Command(`hcsdiag`, `exec`, `-uvm`, lcowUVM.ID(), `dmesg`).Output() // TODO: Move the CreateProcess. + if err != nil { + t.Fatal(string(err.(*exec.ExitError).Stderr)) + } + if !strings.Contains(string(out), expected) { + t.Fatalf("Expected dmesg output to have %q: %s", expected, string(out)) + } +} + +// TestLCOWTimeUVMStartVHD starts/terminates a utility VM booting from VPMem- +// attached root filesystem a number of times. +func TestLCOWTimeUVMStartVHD(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + + testLCOWTimeUVMStart(t, false, uvm.PreferredRootFSTypeVHD) +} + +// TestLCOWUVMStart_KernelDirect_VHD starts/terminates a utility VM booting from +// VPMem- attached root filesystem a number of times starting from the Linux +// Kernel directly and skipping EFI. +func TestLCOWUVMStart_KernelDirect_VHD(t *testing.T) { + testutilities.RequiresBuild(t, 18286) + + testLCOWTimeUVMStart(t, true, uvm.PreferredRootFSTypeVHD) +} + +// TestLCOWTimeUVMStartInitRD starts/terminates a utility VM booting from initrd- +// attached root file system a number of times. +func TestLCOWTimeUVMStartInitRD(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + + testLCOWTimeUVMStart(t, false, uvm.PreferredRootFSTypeInitRd) +} + +// TestLCOWUVMStart_KernelDirect_InitRd starts/terminates a utility VM booting +// from initrd- attached root file system a number of times starting from the +// Linux Kernel directly and skipping EFI. +func TestLCOWUVMStart_KernelDirect_InitRd(t *testing.T) { + testutilities.RequiresBuild(t, 18286) + + testLCOWTimeUVMStart(t, true, uvm.PreferredRootFSTypeInitRd) +} + +func testLCOWTimeUVMStart(t *testing.T, kernelDirect bool, rfsType uvm.PreferredRootFSType) { + for i := 0; i < 3; i++ { + opts := uvm.NewDefaultOptionsLCOW(t.Name(), "") + opts.KernelDirect = kernelDirect + opts.VPMemDeviceCount = 32 + opts.PreferredRootFSType = rfsType + switch opts.PreferredRootFSType { + case uvm.PreferredRootFSTypeInitRd: + opts.RootFSFile = uvm.InitrdFile + case uvm.PreferredRootFSTypeVHD: + opts.RootFSFile = uvm.VhdFile + } + + lcowUVM := testutilities.CreateLCOWUVMFromOpts(t, opts) + lcowUVM.Close() + } +} + +func TestLCOWSimplePodScenario(t *testing.T) { + t.Skip("Doesn't work quite yet") + testutilities.RequiresBuild(t, osversion.RS5) + alpineLayers := testutilities.LayerFolders(t, "alpine") + + cacheDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(cacheDir) + cacheFile := filepath.Join(cacheDir, "cache.vhdx") + + // This is what gets mounted into /tmp/scratch + uvmScratchDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(uvmScratchDir) + uvmScratchFile := filepath.Join(uvmScratchDir, "uvmscratch.vhdx") + + // Scratch for the first container + c1ScratchDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(c1ScratchDir) + c1ScratchFile := filepath.Join(c1ScratchDir, "sandbox.vhdx") + + // Scratch for the second container + c2ScratchDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(c2ScratchDir) + c2ScratchFile := filepath.Join(c2ScratchDir, "sandbox.vhdx") + + lcowUVM := testutilities.CreateLCOWUVM(t, "uvm") + defer lcowUVM.Close() + + // Populate the cache and generate the scratch file for /tmp/scratch + if err := lcow.CreateScratch(lcowUVM, uvmScratchFile, lcow.DefaultScratchSizeGB, cacheFile, ""); err != nil { + t.Fatal(err) + } + if _, _, err := lcowUVM.AddSCSI(uvmScratchFile, `/tmp/scratch`, false); err != nil { + t.Fatal(err) + } + + // Now create the first containers sandbox, populate a spec + if err := lcow.CreateScratch(lcowUVM, c1ScratchFile, lcow.DefaultScratchSizeGB, cacheFile, ""); err != nil { + t.Fatal(err) + } + c1Spec := testutilities.GetDefaultLinuxSpec(t) + c1Folders := append(alpineLayers, c1ScratchDir) + c1Spec.Windows.LayerFolders = c1Folders + c1Spec.Process.Args = []string{"echo", "hello", "lcow", "container", "one"} + c1Opts := &hcsoci.CreateOptions{ + Spec: c1Spec, + HostingSystem: lcowUVM, + } + + // Now create the second containers sandbox, populate a spec + if err := lcow.CreateScratch(lcowUVM, c2ScratchFile, lcow.DefaultScratchSizeGB, cacheFile, ""); err != nil { + t.Fatal(err) + } + c2Spec := testutilities.GetDefaultLinuxSpec(t) + c2Folders := append(alpineLayers, c2ScratchDir) + c2Spec.Windows.LayerFolders = c2Folders + c2Spec.Process.Args = []string{"echo", "hello", "lcow", "container", "two"} + c2Opts := &hcsoci.CreateOptions{ + Spec: c2Spec, + HostingSystem: lcowUVM, + } + + // Create the two containers + c1hcsSystem, c1Resources, err := CreateContainerTestWrapper(c1Opts) + if err != nil { + t.Fatal(err) + } + c2hcsSystem, c2Resources, err := CreateContainerTestWrapper(c2Opts) + if err != nil { + t.Fatal(err) + } + + // Start them. In the UVM, they'll be in the created state from runc's perspective after this.eg + /// # runc list + //ID PID STATUS BUNDLE CREATED OWNER + //3a724c2b-f389-5c71-0555-ebc6f5379b30 138 running /run/gcs/c/1 2018-06-04T21:23:39.1253911Z root + //7a8229a0-eb60-b515-55e7-d2dd63ffae75 158 created /run/gcs/c/2 2018-06-04T21:23:39.4249048Z root + if err := c1hcsSystem.Start(); err != nil { + t.Fatal(err) + } + defer hcsoci.ReleaseResources(c1Resources, lcowUVM, true) + + if err := c2hcsSystem.Start(); err != nil { + t.Fatal(err) + } + defer hcsoci.ReleaseResources(c2Resources, lcowUVM, true) + + // Start the init process in each container and grab it's stdout comparing to expected + runInitProcess(t, c1hcsSystem, "hello lcow container one") + runInitProcess(t, c2hcsSystem, "hello lcow container two") + +} + +// Helper to run the init process in an LCOW container; verify it exits with exit +// code 0; verify stderr is empty; check output is as expected. +func runInitProcess(t *testing.T, s *hcs.System, expected string) { + var outB, errB bytes.Buffer + p, bc, err := lcow.CreateProcess(&lcow.ProcessOptions{ + HCSSystem: s, + Stdout: &outB, + Stderr: &errB, + CopyTimeout: 30 * time.Second, + }) + if err != nil { + t.Fatal(err) + } + defer p.Close() + if bc.Err != 0 { + t.Fatalf("got %d bytes on stderr: %s", bc.Err, errB.String()) + } + if strings.TrimSpace(outB.String()) != expected { + t.Fatalf("got %q (%d) expecting %q", outB.String(), bc.Out, expected) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/manifest/manifest.go b/vendor/github.com/Microsoft/hcsshim/test/functional/manifest/manifest.go new file mode 100644 index 00000000..38dc837c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/manifest/manifest.go @@ -0,0 +1,4 @@ +package manifest + +// This is so that tests can include the .syso to manifest them to pick up the right Windows build +// TODO: Auto-generation of the .syso through rsrc or similar. diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/manifest/rsrc_amd64.syso b/vendor/github.com/Microsoft/hcsshim/test/functional/manifest/rsrc_amd64.syso new file mode 100644 index 0000000000000000000000000000000000000000..0e9857245ba25586643b662700494e852cfe75ec GIT binary patch literal 372470 zcmeEP2Y405+TL?cLa|{7mEKY8QWWpi`+Kb@7O-FSdhNaU0v4oqkzQ0ldQU=40)fyw z>6PBAs0bwUzwex#WOH)T6G-{a^Sqhao!!~l_bcnA6yoHsZ#?jZ!W;P=KT-e ze?jl{{<$4xw<7x`QvS{YZnv%rXFl^6XnC;F+Oj&GjkJo(>U18rk)DtARirhL4n~T^ z@vEe-`zg+IV{c!itY3(>`!&#dHM2Tv@Na?E`I*&O}tw=CD#Pc8S1?=4SAH!EO%FDqbSA1ffdpA``CqvcuhljVsVWVx3Ow#0W7 zgjhLN7$_nGQQ;rD;?@p#CCAKk$F1n$iCfh_ASr5k!0zAo=EufePr5C4H_Hkbb*&XR zzJpb1RBJ1+r^gC-*Rnh}Rkqw~*XQ4+r+;g?=5+V^hS0uNKv+M^13%%0zi=%YV7ZnK z%70x&q(x2_2Ffi1F-yL3$F2D(FganNC#RcS+|vqJHKy@Z>t{c5*QU@9+Q%&K{ba(X z@lPa0Pk$kPRsWu`%ezjEiTo}!CbILg*kxVT#w_a^6|=m1e9Y3$(b%^xZbi>E@vHi* zSTp^h1z~+X!x#3px-aPE`gm>+>%Xw)Ct%-?EgoP!xb!FM&gDa`7VAe@wGV|_0XenD z?4SI#<%UlPj2LJIAYRCik20r9DhyOe24YwAa3@92a3{wGd6Hv8ve-<-kju{A9RBg8 zi5tgW8NKM^uVNy9n4GwIQb8lNRdU=Ml@cGSQW6)b)TG5ggaeDQ zKU{4|S+2HhS*_yMjZw=cJ)q|IJ73M|W~n*dT>w){!3U^$J-vW0P{1G9-a5CtwH)aj z_=aiW{jEWZf3)6N)Zc1_?dq#XS{EHyWL5GN;H=I%KJcJU_k!M*d&xkF5qvq4?TX1j z?26u=gbiae`qbzpU+VF7RQNmB#IEf7N^;DsZ{ybvi%Q%y@l;B}LfH0v8w*rQLYQsa zOVVNICsPv_ok&RxJCTwQ3M}ODX&H#0pN3c>eM{={^n^_l(pOA*IBjmvz_g&wR$53` zD-AY(%En2gCv2R=T)-(H9mwzvD9e_IPe_Ap?+fi?ErqY>j{g5O#0~A&jIe4`vMYvW zwRzOx#@Hb+QsRS(srVK6uH#_ysVOTxdygyEhTzAvy>FcR?1f32r?iM$-G5-><|%8@ zj~`5mnFSjU_1ZZ4bLL_5u<0<^cGyX5)5g?d?!W7IUK94w|F?_D1TxOqbFq|K9(b?lxH z3VVj_B2GsPPa9`n&VD?rO*`#7r+hr)g@roak6klDEgp5fw)>E-uy<|u+57Pv>_1z7 zpTlwV1@sX*eqh|-dmMa$!aT!v#0Vqc4?e{FKnq_92Y0hPeCFW0U$%vTvdBREy5XMP z2UA@!OS|g6J|?15o%nSl-ouzZE+ua6@#OeBUNJ547GIZKegS5bfeuf)-U!xzc6*u4{!`Y znDf&)W`Hl)0sMkz2QRJ~VVz^-41eL`dIZch%KV`rMbc8qKveigZmh%6@$1xAEqCnl z9xW5rkDitsGdqp(IOgeS^X%h`&*qK4pzWuwP_b)QxigU zth#yOo6Qo|kD8bq6Z9wgdBpeOw0n%{i?OdacFzBt;9*Qqp_EK+w$w2Ywdf-^p4aKQy$!S5 zUlzZ1Xm`Z7CATeo<{cr}OCp`23O3 z$G{h)V;ry$^94^tO|;H7iV)TpbB6Nlz|bM-X<}eQ(4#Jn@p-x5+FS1ErQbY(G5%IQ zr}xJBC2aSm-%nYg)-U)7cJKB+gDX8_{hVyOp!>}nBJa<{1hMb~Uo0QyeV#bx9jr~p z8Y9FB5)Ygv{jbQE*|gwQ*QWWeXxm>mYtAS$Ax)}u`eKDZ)o^-rtpaX#(7#AE$rJUjG)YX?H{&d+oeHn_fbp1-HZ zJU7dhnmCzrXuj~i_mlAqzOeRJTF#^W^Z5b6X9kBjKd^F$bw0IV!snP9z&v5`YJ}8Q zQ5b-|>-qi7i$8CL=ljWaywB%(#cKP;&QY%&ON6N{N#SZ+$`Z9>%Tl##>r%CM&t`>p z2Nd*KEgl@GA_lv2#Nt72MJ)Qst-=SoRoDQxTG-#E7WQ?i`Mq8GJwM|km>M79+*ii( z^aC6lc-Iv5vJ&uZf~Tpc!CfpDe1aSCfnJkUQFK@Cv+xXHjXu7;5u|NDYDveZ6Qiad z!yF&ueLmAGhIrrf>sYI=?L0#5+#0ENZ(phQ?OLM_?p?1A?c1mh@85)Uz54w~vf2?n zQbi6A06ZD#QskZRma#lL^l`&*yp4z<9_=H-e{`$R{%*CP4}1fjyK^m0hCi^M!IdFj zp#1>aul)eV01NQG;kDF+afk)r10)tGyjfI;Gx4sy3%0IpAM5zu!5Ti5lC&88J>Rb_ zyzQGlJ~c5EHosWy+P2(b>zmY(15v`hnl^eDH@2zgShMz}V(HhY|}^ z2x~6*O(w_9)qOzi#`H#VP{vhP27I7P)SdPOb%$wI$gsrYQ)5esDTAAWoS#}SMXICdyw@d2M3 z>UR$C%y0v~X?O=Uf^!1+&XL3d1vig!eMV~XGTkrW8#~`5$Kf5lq$Q^?&yR0y@Vh#` z-`2_cogJ2^@SQ&ITK{y|J$yu18piKwu=!Jm_HR6eKK>N?bsLQDy)k}ff1hdlhxctz zM-L^a-O1Bc6+M}6vqT+Zh-Aghwc9m zH_5w>kZXzY>`-EXa&6W5ySapQziK^5ik{h-F+aY=k(L-WMI~KZ9E?7R{E5T+Hl0LydR9Mx*#A(x+7#06^!5FIY(GnEfO6vtm@XZPu>$577xr^| zeS!VFp_KXpzH5}m_yzLt{V&Yu&7eYOkI7Ao35@-Zn!}z0*SJZLK^}#*fX9VS1x#i|^WB|{HJfZmR{hZELz*MX$&+)(_ z79>Qxs_R}e{o!-s7QcIc>ZTzt?%TEMxxL9#2keX+cVz$0WhdYd(hu!fg}y&QZ4A0A zJz|h`665|ff3{cJ_BnlljtekGV1LUs2b@1FMR9?iA7tAwH#pMF4ZZ25 z(YBBA-?Ml&JQcE^fVglf^kO>thPjw8S^})Wz8E0E##W^Kp51norQrE_;G z*bOA&8T~Shi)SGop1x#Az_g`91Eww?R_V8uBPtJGJ+ktII-EX`R@s#CRF{Em%<--G?jvN*cW80e_KfpP`Gczxkmk%)I#uspWun^+|_PeF% zd7}0Kkmp`J7rcjB&F2I1jIiw6cW!rJz9E7B|8~e;&kM1C$_Y+=HTlp1F3ihkuBkEl zo@+YPa|-AD9XTxU&lSTfB`hBv&<%d#`BfvUKE3Ie^DbwbH~f6A_6hJE+CMEF5?B{` z&i!ZDzxP=n<_7p&0Bz9QSRa(@wevB*SCZ$O@d4Kugke46yk4%-;s-c4$nOuOVJ!GE zWia~(_+OuIm~+4U%02`5zjoTbHvDW|fM-Bav}f4;-vIjpC(Jq$v&P&zhm3FEFB;@d zgFbUy?u%2{chV2I-~Tv%s3%>o?HlGvhMg~3IkIy9Wy32qUN`FO8e}_iXkd#B`?qa; z(LhTr!LzufBdWp&1z;W?+e5Kkoa=*1&|m170Dg<>lex}_e%F>mjy{;jlpwfPK=%Wf z6Zn?A<2&W<+{TAIMt)_FfuOG5c^=5Qx2;b)2Fy#hsGswCy7U@3-}SVx?Sl5b?8^!I ziW30+gQ+8(_Kw&;b=h#wH;Dgt>9XO0>3Tk3coh{J`n1{?KTI9mI#umU8m$r{UQx?N zT?n60%xeTQ+hp2Stf?6raK3PUpEK?U^qc_4Cm0V7OPu6gGbrB`D0?=ZQ;YGPd2dYp zlhp>}_cVT&Ixo9tx!3kBJ)dQC)?u&3WfP1KOq`Gr>w9fK0PAxtwc?i=YERNQy)JOo ziaBc5wBaf;Vx~HHAW0qCF-NT)-=xg>0N$VBbkR>9{VsyxJU_B2=3_F}3&96WOPp+- zOI_msLe5F2WPjOU!2B<3wcGL_?q6)%K4bjceSq%wOWgMT`4IX8#0K8^{|JorcSH|X zd-lYsZ$AH@y8qwTsQd4{Ufp-cHR^-69#zSStJVH3Gq7IpZ1224DW4bAe$6MoFn)k@ zg?uOJ3_d&5F(Kvx7pxoY^#P{uC>z=?t@LF8ah-m?i|4#gGRA}5=PT~#v7ECihM3=< zjpw}&V|uIuP-~~QRDb=Es(N*KN8NRMbM@Fmx2i`UY@^eA{_h&~#YfMoBY0mRdVUAB z81n(A<$VDk`}Jqjh1r+k2e8&y`vLms)4zs5_YLp?i#Ct3E~Gxm|2OuvsgFs2y9^tN%Q9yL#mQR;ok0TU7f8Zr16; zZCj}a@3~RMY+j(%)t9tO>7HaT-uhgC$tJT6eBh~h8n^ek*kMui3W#w65K{(ca0M8A%rjUO8 zH2MJ(2aKoM_j$!~;T>YXYq!dXSyY&2LQk}>#<>5f{Pz9qV+y>sr=x&%?i0#}3EgouL$!82N#B?2qG5OS?+S=jFFv_<==( zJo=r((-aGwVjqIFBVSPWgYC5>Wx)pB0(pRBw2yx+EeKI8Q}e?%SPd~>O? zeTOY@eNWW9_Uh=qrRvXPscO$Id{{@8|a`On*{RdRo+aBRL;{wLpBv$L9iE8^HMi z?l0}{0v7e&^K(As$LEFYqf2m1pnU-JDi!~iye?JWyjN1n%%H+H2haKSydUOiAF${B zaR1we|NI>@tw0)@ zdDc{6jjj~kIDE5?$uL(l&5rGIeCE%7qAc|NC22F|v2GI!%q_WC;3WGE=vXhRKE?zx z4k!gaV7dKW*OaN&*^qs-ZU1KeFWcN7Ww^LhkNe9?TvGblnHdXs-zO-*I6s@7_&GNn zeMcI{8W97n_EdeW6O#GBGwcKKO&=`-yuaNJKAGb{#{PcefA;BZYUp>i5f3olQz9Z1b3RkJ#G3c6M6>l5>|5F*b2)F{c6gUUqzN)}kz}Y2* z*Pg!SJo>VbUX|XM-PNsTcX6i!OtT>f<)H%uRruhN#Uj+77fQMfMX5ihwil(&l0G-5 zHaqvT4c_faYF^*KZ2I9(XY#S{WKcJ^3hr4+j{{zQ;+pe1v@1D%FQ6UK_Gp8&MdmLC zY66!4Rf{VIKwb5L=YT;#1aJ`G_bSqXzk%O@!@#j(0_!_!fx5^ zNe7;IsHGwv*Jy>bm3rb~fN4>IdU&m;9%-qb2E5nxUW30*S6d5r|pkppKdeiH1}-IIeqQ*@||&g2UnU?KkYVhaJHWuKiwX6A=P|4 z^ux%e!0CgnsO!G^ZS?dA$_Uzk6WnL?n8*5_U)XI^?{U-}BUbg!#T>DhC2%dSqlWW#ZzR3;SORnzz_pLh5HuHFL z$~KbYr)VV1Tgz`8fs9FVSb_hLiSjzMTGr{VBle z4iDTsA2vSm*Xbbi5{a*h-{UN9WI2ZcT zTFZ?2+`!{lPF?5uwEHI?Zmr(?Uklaw-5b=vFRxW&yEIp``(35NhBQ+vMm16E$9l1O zd}9?o!HW&!ny59uHo>>Mo2u}k&D8AvSE&izuT~?zxkmN*-8J?ZL{-!sF&1#^1ypwowS=5pKPVRdi_S#@3ZUFFP*PZGx}Vm77c5r zR{hdMtsB!sMWO9cXglwP`yz&;&A7+0U9MIGzqn3)^VSXOwg0r#_0o^Aoe#5}xR&;D zdGPA5jXaNan06Wto_?gYdh@v!>Pxh@=SSD8p&hSLzjeJ@&Fp)X3LV@`MS|1SXd~Oo zHj{I<-^gL*=qCE!f(Klw#&>J327i66>iYim>Z2ELQqOc~ogvqD_oBa`Ke1)*Y@=`9 z%PVvPZGI4a%>(z|nhqFUqFx^WqJc#~2rv~G2TZi{bC9k85&-u9=3JAq4r2qk*$K;N zQ#_CPCBR$26}Am(TeS0T?5C|}?Bo98`mtWS{w&w9_q9PfA297?Tg|X< z1DFkb4BP=U25KM0jk@jg5yEoUqgto-~mG*0{@8r}sZ+NF~ zy7w+`JoKOTwDp#%`-j)VRIX0dOloyPW_e0Ve>XkF*QI zY&%o4e;vvn05YzJd=`7;w!gNN)IP`c^;pB+{s1ZO&Gz6F6=d0Xz&`!1f`{ z()NUj-nrJzW8LS+O|&+}O|TSFZU6Xo8-0O-Ur_W5Tc>+qd$phg4b)cHE&I8l9k10k zZTcG2ujOh|_C4%3dwqPpT8!gqE7T!l;|Jz7(ED}=H-KK+z%e{7FYLiF8aPj%k8RpN z>!goxUgr?@(ciG|?epn%>V+p-={}t8V_W_C0q)bWE{=Kmsh044*Xll(_X_HxAWqg<);0V70=~9)@OVJud^rkG8Hzsn(Fg$ZS|iH zE%i9E{R7_kkL@MedHZ_x;uANho*&jz(|cX0*8UQhj`im%71%z*dW8ClG$S51`U`o{ z=4q2zbeZMOAIdpjcPG#XVE;#1(>9zo!hOc}2OzKK^pNrz0YI+Ta9*#Z^K1*im_T!a z^d-R9J@3KX$1dc*19a?S+J$Z2Qx=TxdWo(pYP_o!`utS(`#>5~_yMK;0enL2cuOTt zt_u0r&yYF$0fPR2A^P*L5qmT4V_ZyqrA{)|WE@QW{vEnO`}EZ_?ki51`kgw>`VY-- zp!+ShC3SiuHRs2x)EBR@&0ZTaHfD6R?cKMi=O1sS`_HY2DcEmQ&rSU%Eo$3MJtn13 zVJx#VsG$nN_~wmgU_)pNWS8NmjBldfegB0PYD%x>im?don|(9e$#$7Kom}iKA->JE z_RV2CYN{PGJ!-|sK=s4B4Kau0Z9nG;80WwDd~*feLwxDc_P%vG^b7qB^eruJf(xl@ z^~PU4`hSUh|4u^>=9;y=g9B}kOnqSVn)addM(>$o--Q6g=9I_ro@m=zF1W}1T&tvx*&EUb@j-pHZAMjORxQpuHvrU~JF(*bC^G390d6c|Myk+#6eZ{M_2icwirV0FR+B*vD;$ zUn>oHGj3s97!&9=m$EkI#1qGjTW4I7U)y);AY%YMca5}2F@V!1U?zpUeE8 zS4KN$Uk>}{c&_mBmveTd?0PWveD*K*i@ZREG)!~WCJ zhYN%Q>cA-*Cy^4|P6P7gCm9Fq4ypwo5Rj#xv31b7&peOrKzl@au{;YX@3x(v`vaGH zJcaZEU=u*z8FSI^X2C9u+c@v+eg4m9fP#w5!QL$LH#X1lVMF==`U9q>ukh;=7zgYL zrVj|rEeGL~hSATQotjoT_oKUab!L^3(4XHO4h}aWc=fIwjy8djPu>pMo{Xv#;9S3M1 z5RlV)Ak+UdZE5=ei2=&9Ly~(da|~?$&8m_*{k+CIXV+-6C!}_J%<=uaC%ASzVx-^q z;=K{C-Rt>dGfv3x1zsOeTaN{})*N>33ljtMw(Y-A&%4VUsi(2Q;G8i)j@UNyA`g5% zP$L&q_Z$toz1U(iT+3~)SDaLsdP|~Cwzs(Ijr`hjZJ%D<1Dh(gI@vaJ|AyNFr?a64 zvo8;TjR#;19|+qHJQmh6i`>y>y$!u-jIrZKhz%wHe|X1Ci~$+e0s*GFytw2c_2kUJ#>w6>8Pa+{cc3%yLm}}!&g%u} z@4_YrpBHd&ah*4T?m%bYheF_coYM>VZ`8B^t(&O7jeQ+nuP|-pJ$3=U+cd>hi|p(R zuUQS(=mc~D3e!%W!?yMX?lp|#8tnmcQk1rK!}%Wp=dfO+S!~u^t1v0-+_inyrEd5H z4{ST&Sa?={uyL#vuzkiwb#?|_{y)S8D=~lYryUc}4{)v^%{wPpQ$HJYybokz0rM>2 zCAN?DU66L-c+MNpPl!(_2yyv57P_QkGNk_k6p(JG%oh|V?Q^uOQ$wm%g)M9X6p&`8 z%ohZw?BnMo%sN}^4zBT@eVtQ)eF&J77Cd8yE|)U6p_opbyX#*bBT0 zTnDhNOuu9v_OIurVEUe; z*V|Iv)?aX)zkv&ZX+Tc=J&m$EfTMQ$B2u>Xd|(2w8_?geLt5hYZ`#ike!vYIM|@D< z+gJQzx#1rgVhk}7^M@E4=&=BOK-$hA?>a%)spnKN|OVSGnC z&KU&UV%meeo6L{r@!p=M9Ygk4C;!*gx-sKOGayYw9_%_EGHz z@P5E+c!s#2egOU$(88={|t{KW(}$@Dkw9{+DC_k3eN0 zH~VM*pOgLHgR-ju+CS4r0Av59rvLX@AnU^%c9APxa0~`xr?MZGn zUYIi4J!1wF4=_IqXvTISWh|imk#9R?+dl))F|mJ8V~_u7^J4%0RVBy0m=n}2fBJzO8!H7_7!doe$TY>p`!lhAfNO%dPS9RA$Z-I_Cs+^Pq_133K7c-f_Rp~Z z^TB}m_K?H@*~OM+kJ!K1e-?!(zy|(@f{y#KcIYE}{{Mv2_A~Ph@jcc$`n_S>Mt%G7 z;@WolfJ(d-TuXBm+=KHl>ZJTjXVg z-`^1{tWPjLUU)0FQy3gt=D4Wdmy81Th9{r``SOZ@k{{U zqb<^N!Tk6HGbg}fUtxQZW?92%?m<$qf3g39TY-76Y5UBJuEW^=cYEBQW8d%e1@r-! zH{e_V?bOWM<;SiIa*U~WAFk1ecZgKR0e(a+@xR3Xe$*n@U56Gl*D>|3;96hUww>`l zpwrfK%co2|%h)sL@6B^Ov#+Qr?f*EydZzPckjnQ*{76;oU+mwHTI9JaW{l;*TAyUw z#`D$pn`0RR>;EPqKM!yNYk(hF9a66OyAQb4&Ogunj`hE09UtGF`3B%RS+4(k z4B+}&K6mFkH*WxZFTf4_3~*g7*W$$i{?`B2#q~Y~xc;{aFapqP@{w}=Z)f0QfNN); z0=Qn7ZRNT_y@uFN{NE4P;`(H^sU2`5z%_l10IpBm&wE4q8?e#Nvz>#0bAa5||8f0v zPW;`EvKxTkf$IUTS2XMYen!4jtpCM&f8FNz$;5&Ye8fRUO z`M}R$myv*(w{!OWv~BJ;?*=e$zv#%j1klIcZ_eZUg3N3Cx8MKCcy|Z3u^s3L(580-TwmV+ z;Cr)|0<^CktP|;AK-)ah^?<+kfB0_Pdq9HSj(*6mw)64Ge+JMm&_8fJE!X|CedhwN zGw);n>9|gJ`}%AX-~ZwIf8+g@VAkjTKDM5*4#2S=>jL>aux40LKkvL; z!X$SU*ys(w`v9NcrvgU-W6w^X;4jbhe@Vb3;8~zHpl#lyY_t5bGXR}3_J14o4H$3Z z1kypke1Q8q0dYVI&<)rPtOfY}4VJTSqpmam0!1w360z7XQ@B=`bTDa`jjJBHhe+J@OivaeSJ%9}W{npn2*U?7;^a+0f zvw$A~wr>Cs$~^49HQS2yzkI)sHqUo?`MnahX$mkJpkEjQ%mLWG?*YEo%=a0XXFJyc zJpi7kzjwuYaSguz%WHlI!~!XRKmI0S|0saGGv&L&&hvI+ySiPcmKpP>`^RsD@S2$S z8)@5gLC5~K{nuI$(%@Xx`e)ee*#Q5y%_G2@Ku4fA@S_cWE8shT-xGKg z;IUPlTwt5;1IfIfcG=Adn6m2lfNhMcNsUVg3m4hn;6R&-vTN zQ@m74@Of2W3!wmQop!<4{tri(DFfzL1DD!$o)P=!w}N>6 zwZB+Z_Jq`l^tay6(a-zq`?(MCKl=ZmS~`woeMyr8^*g+<*CM^kYtFaREIx(TkzdXX zK)22Ut^yc)5?2C^9r;GcH?v{#O|i|qIhM!s-L><1rR1Ps6_U_WcGKuz zIImwE$cVkrzg`B^-aOTFg*SEA%*=b32TXaEd@Ym_Jg*L*zkNl@_J`Lx7pS`*-|PZv zB468^uT?8+z9zPLj4sD<8F}v0=OON|%sOMH2YPS~=QY`8XIs$@XZv`pKA!t|tZ8dq z?APy~uwL-#Dke^|^^>gsbp8;tYy00d`-)buTiQRy@|o}Yb|fY)PNdHtN)$9DR*mGcoiUdOgbb$iQ; z+!6EZ`@x#uTfFvPn7RK3?EAN-PCL6MdD%Ym;xq2o%8PtT+kH^~fz1HtuHOTW0SkaH z0nW{h1vo!Oo9P0~0R9B{OrOsdwgUXV59b;es_0vFTmk!i*S6&@NI56Z`L{2DRlrW* zE#MHqxyNq-&L#c^@Ob(FK2w;@JnUc3|HJ-k;u@T1iv&Ig*e1^1^H~AyoX;0HC(k*? z4}tZ-F5m-T8?YSUeepa!|BtkM*?&kKulLM%e_s>naKM}wh5Qn8Kd@V4|6DWh zA&|;4q+ElepZ_CG0y+X*lf&n8j{-FS+W+~$o6N)h_4j{a|5xA|Y=?f1Z|B=0y#}DK zqJQ`wu!q+|`Wvvu&eLb~19)G2uAra)bDQPm<%c={qS`;k3-uLZ{^!UE;tA>d%ZqaO zxew^Sv46e(58HbI+J7a0v33XGT7Y)PwK=Z=2Ur!-QGhv*>kcB!C1AJ4{%a%u39tn1=ZQTW%~L0IVRQ;Wy}*9*-Bb| z7=RrZ``7E_uuc2_8sNMD?Y}+1=Y_Pj8UXu$fA-H9!k_)ugbjZTYy~`kul;|AeboTk z|6_pZ|EnX%m51@adP1glp0{q!OWB(t4%0Ribf7q+G|Lrp`EzJDC ze(s04gCD5j2j(``b(J4&mpk!g0Jb3Z?@LM<-o~80j@P$NKfek3@uTd|F$a*J=YRSb z#0pa;1THF5f{O*T$p0K{c{|xwq z=i~nbNZ(H)%K1<1zkJz0)%C#Kre^*BulDo)yw?Al_x;u+O{sbw>oDI8W?p{fmjSW= z@@M~8`=i(Ep)c>gH>5t}|J>|f&-vkb|4IB$uzyk0cg(&_;{Nh$@>%Xo?7#fkKkQiB zFaBrvJ&v(|PICY!_>M6CN6@!a4CE#5&tf6vdqeEM{Mo+2mNHZ)%tf`TW6jx&-)C46(AM+FQ4}R_mbPR9phQv5ZnHJ=K!&-pY<5iUFw5*Z_ue5 zk}v-Zi2avO``?QH`=wTQ(ET~=Jk;?V(Aa*`))^OELk-_C+xxx1@~`c3hrSFr?BA^8 zmG!^AB$d%^%>V0oK&%P6&wdwBkN4pZ;0PF#1?>7T{ z|A*-}%;Wu^7OdBP|Bv4YvW0k( zYDhJ`_5^mzcYbNdr|gvZf*>8o6SV)^Z2PCrDQp|f{h056R0#Vw+Ku(RTHZ%OZ*buq zK^+sAGD#~g1F$Fd|8D@@fv!MjpbOB=&U2sH&wVW8G0gv9gXO(|=XQsB*N(w%ZvprX zarXU0XP}!;p5ORj8TYfC-xPP|x&1BdzZNBg{=XWo(GlSFob5AtrlzgDAGVckWS%Lv zi9WzRtP`oW3+Bu7Z^x`lVD~lUJ6`2k$Z~JOfG{8o6p#V*ft(WyECyusFX*@V|H1gZ zkK#au=%WiLHaT1vs1OXmhiLmp$~nVdfT_SVV4_c+`=$Vsfa$4j;L zNj{L4>-qn(DsySKFkl#fjhgq zgU9Ocek){Gze{>yT$`Ul*YL>-C99Y5)5Bzew5t)BdXf zJXY-A5G<)MP>LCVt@^Wn7xvTs?*rKX=hyx#+sBIimtvVq(}V%T0PI!l-;g1xFi^o5 z5c{uS0+-tr1{@5C{X58ztT0f)7!dofU;>xh6$Ts(i2XarkgPCJ!59$xuV4a~+Z6^J z42b$XEm)jKv91Mv4JIIi%Fi^o5@MHh|u#fBbx&E&m!1sT+{*Ukf zybA0mMM!@GHrn|W$o~ZJ{U2HXTfuZlZnx|*0DE<=|Kt0A-EEsbi9FZ;UJd*XR0o~` zw%g@g|F6GKjC3(z-WxiGysZB%yDA|quRsjIR`oYJkX`}sef|`H-vD9%zYGYo^K+5% z+rm6%GO!ikHv#zV4}K$n=S>56p8ozPQhEQc0_l?6YT07|Hfs9+OOam*(2jQjQ2=dx zyPe;ObPEsz>;V#iSUVqwl>cvF8^Gf}AQMRC`#)t*C8Wg_kOA5%!2b)v?|_{LTm)3J zVe;o=o5!%6$C+_Eqr33N6cu_*Q6@={MhgSN zfG{8o2m``^Fdz&F1HynXAPfit!hkR!3)S5C((+ zVL%uV2801&pu#ecYIV%*-op{xR?hi=<2v@Olj8xMbh(vN`6+L)RV{}D4%_89RbW?d zS^is)Zcev6elOr{y*{X;zXMV;&i8+T@XT_*9Ay?;{_Y^Nc{uR4>trc+-n#cvS#L0VdDa{B5f7@!D#7gKStXc7J9Pu{D$i;osoq9A|8{iVpr7*F@+hz4 zEDv|q?9FE(s1W5@2=sG)76LnYuj%}oS9ul!{hXhLz}(9Hm1Q&pEM=E%Myc0MGdHv4 zG>PF>$7}`IQAeeE9o=D%@?U<++Z`RR=C>SmchWLEep^oEi*)%Z<+mIH`dyc&i&Zx4RlI^Su62gZannmwXn@Q&F78U~NTS#*iFnX{Eb`PH*WjL9r# zf8nsf?B%Ihyma<*`mBx_7w})6*?{cjsNc~KW-kv%d1eFsmuKo&_VP|RKT}7tm!mk( z@=P7^e|{zb*~-0LQU*aNhkP9Tr(3R0*{(k}Q$D9G*I@@gUik-P${*#*Yrk3snfd`e zse@fC&+V6Yc_z93>N(}p#nB0P_0xNF_VR`S!y8)L0|w+&4*k^kZ^|=;@7-hcx46ok z=EFLD>#yUzP{!Yamfx0frZH~bsJLYk|Pq9z?RW|WwtzTsmuTOcGJnHY` zTTcHo8~(@MpW<7dRX==7(Oc2~9d_7!_Ct!k2X>NcCGfAgmS^6d|HGu*$iHmzlb8JS z(jWhKEqjFlVL%uV2Fe@*O)*U9q{j*vE?9M$!_VbSmP!S00h zKL?~HMF2|zQWC=hcJ4}W=hV(|pIPpZZdQO3^LtqVp?xjSg5H)Z;z#)?T29=Z>HP`o zhPx6r{OU=Ln-{QU%i0`e%ytg0V|ix1XjKXR*nQU2|5;UcZ~TE@%+2*2bALfyEq8EN zD_{ZiVqqW4EjnSCIWsBj+fBRPzI#g+|GjbEd*{Zk=y84gx>1iOZW{M#{MzB;lA>o$ zN{$Vlm$-RCRKkWa$w|>ux241dZP^(5Npk3q7j2l^-C8iWhifWqemrD580m+=li~fX zJHz@~wbqWZ&h~egvwyHW^b>YLw-9Q|uQsNh=$lvZzL7gqwybHGN$tG8S zvc|r28%9T%W;qx7@F#R36tUv#(1k0^JibL-H>Y(hY2U&Chw!abm!%NEtTW9fJX8nK| zp{G|TxGg{b3?#)&)3T3V`a{#CsHv+{;?cJ!hS9eZxyv2p$&h_AVu7T%c`9t^mD=v- zz~3|0b7D>ou^;WY2%s*|FVH_|p8%PsKo4GBG0dvMJEjgGmduD{{IgIo_rlJ#?Bmvs zcrYnu_Azffr|ms!zi#V6GIYa$x{-?bZ%a~`+PQt5T0QN4H6Jm2*nmJ4+TW$Iu%AmU z=<8DRd%M)UUKufdR=p^i{GGaEVoRe3vCxBuDcMP1>exVHM2DJh+ZnHI0>1w(jNxGO zi)io1fy`JQdC2<|Y@Gc-+ScU7={vVZs=Yf`sRMh~se^mhs-uTf)V9q(sU?Fg6*(eM zMGp65!_r|MMJyTWQ4xbZYSB+_=m~VA4`RcfUi&w`q6ogg=z@tAv>qTn2*Eh95pSJw z0d%6m`+_aIVsw0uG2H;n<2V!=Ib-(>z{VkSwR`(YE$_nzHmjotqtlNZh&pv- z|E7~j(+=%jo4#^veaO{PONR$)IU9NVO?~uZDRg4-AnJo#&4)hBfe$GrUC4+L7$;&b z;a%#$&mUVZ=tPC}1smqSsqZ~8YI@V8m|4RUH&5x7xN%%(%+-C5*zenfjbpw~+%&N> z=52axO%Ct7e`n;({W})LK+cH=cdz*U;O>>`(7tu*(B5_G_oG`?!qN}Y7Z0@3B8OMX zTlTge@YM_BGnPU(mJIQz@PTga8|VwD3q_0_GV}m)pNnz9<wWmP0NN? zS{*q&U=;fJuObFm?PtAb)t+*{Tieg7VHcebA0C2o6*0(lDtv&Y!hf{X5^d|yi5zS` zzx$~RJeGaLLd24$e<);}KwprK{vZu;!0VLj%Hyng zd_n|#0>>ASVPXA&9w+d8%prEA4$KBOc>Yvw{Q&M$_xbR9ck#X5`xvjDAm&^AlchG#Xrb1vn54RV|FQaY^p7faeIFHB406Y{vgspr zfny2Aj2u4{qF>1955UbZ>cFJ0%l~s`^nH=OwomRDZy7)9kMA7zQ7@nifhW-~q$jQI ztzLWiX?52>o2b^;)l@%^?4uGF+@<@1e2okI$T^4WX6QqP9u(dmoFE_2fuD^Il(%_f zGuA_8Br@+!r~_Qt_*QR8K#CkGlWPYgCtSUQu&`$En!)52!^0 zF)vVru|Zxsz;dGpoNuB$3+NBLI?&4+L>-v%UHKXt4td?%uR#wMJ8hpFF|N)JzquVl z*;2-dVUMa=GsdX@KGRMmC9PL~9bBrGjyOjx9cqsa3Sr;5UDsb3^0$( z`hoAM130%_%nxABL*EDHzizQ@U(bozdZ1(KLil|@I)m6B^FQ$mA5uwcda2l@pDK(C z^_<{Rj0^n9s-*Vm`+**?PvbooKo4|03_tlYbpYp<`S}5^q2s;a`A0un=339m3FqaD zNA@N@3($Uma{(L+EE(d)e8B0Iy;G-nZ;TN+X3VD!&^9G5^aYw9@Rbrd$hwp=KhxvF z_7B|bDhx(`dA|1j3;Vck|v3QUNEKV*M@rv`Z}? z;a1Cr=My{@(C65BZs+^_ysqP{vxMqp+n0}YW3B?vqA_Pco3MHDmujvS^|fB5jBxB3 zi3g!O9^f*dK2Q_55I6^@2AuB)+<$I|b~m5%%9B@DW7}tT4IDkIi$~4w;yF3HD^g(L zz)D!B1N@X7Vb~u|4ECnnUO2F_THu3)I9DIb<7{x-U!A`Doacq19%uc`^LS?)e9!l{ z?W~jSF>Uc}cjzG8%YaHMsJoX7@-Y+q%={t1iF9ty>Jo5Z^j8-(ui5;uD$lgP@mz3A z?wJ~J+w99tCOqZ>fHI^kuK)rmpWJ>prZUh1co&!s>;@F@JFp)(WaEIneH7_QfX5K& zkKBKA`onE+R>VX1F>S3LeXyl^`jM9EKkZxS^eN=c_7e}aR1e?ZN*sYIXZ??& ze$-7ohdi&zYqL%s`xNTrxvay~$CPc**FpKCxc0+1p69UM4rtrck9g}d?dCo3Uf4#~ z|KNQ%9(epg@3Buk%rdkQz`5x>zy19;AA7KEo4;UF8Jv<=8)n}rY?FJ+gtBnT1T->Z6Z*2cl-GA?`r(S&W2KB-d zH*9EkZ=0SS+O?+a{s%k*ya;>(3<8z{e*jKd7}*@d{vJRxpulTScn-jRK*t}{3*>79 z8v%2@Xyji9^m^#fpDxtTGLDB?=Wd^2ZQnNCQro7x(wOcIzEJH6Zmd@Q+(Zre`danh z$66_J`=1W2)%b2#tCVSt)X|0Y)t}+@)UmL7>S$!I zi~RmM4OBEZp4Pj$`s|e(pg9De z|JGOy`s!Nviq`4@=qq$v-G9$5(2bkaq#n)H=JAcy-jD|B7}~(L@Laa-2?%e@3}$Coqna)M|IWwAJ07%{hM2Doo>w|?+@OY{TUJNOLL9%Qs8-D zEnwu5jQo|h%(VQlO<5LL59~Uq3;JCcq;~_n&gVczUxK{r^H-bd?G+;{*N&awIszyl zJ!ZV6w$H4g4$Nz$<;VL^o!&?d`20H9a})LV;(91+pt;-=+(7LCrv$n62UBj2-3R%T z??1x4`muOeGx+UR>XoNks&!+Us6XxfdvTmO$6x)jLd?VZAi|T88kDuH~>%?xzrZDyM z9G=T|ra)g;{nAAJg}TkP3R9=^{&@c0kjC&|wbYK80Sf%lUNpd2n$6!jq{=AR$-9a_e_s?yZj{DcZ zy{E%&(wUll#g#&4>F~Mf&@p|DqSniH9+=x89rm8ix{FYsAN{5eX#_p15BXP7aT7FW z^`8Px+Q4>w+6L~WzDk}_)z$%*u^px>@+&9GhB}bn@=6fWH4Yd29eZh5_;rN@jO{?B`>y%6DZ<|`Z!L}*Y8*Bx1%54DK^>@sw zaSh^(>kpwWQ-@CNZBzfjkcJI+1zmpQj+r%@^E^{0>+`L1`}7)3dHwyt^_u9mI_hS9 z<~l6f6I81?kG1d5+@~{bh4%e>Z%Ew-cL&$`1nHQ)A@!o6EA$1nFKbBua1uI@?(GX) zrVr5b!r(FhAisng@BrLL6nlSP9Qxo}$EGP>Ke2kE)nMHut1WP+jk_HAen@#u8yVFPO`4YdG`Tqm$SXBPq%f=MDKgSI5yw@H*UPg16^#kC%DeL(1qpD z1;}6blWECQt6~h05f3n)Zb&Y2lQYYJGwvhocCamg%e`Y(b@#5In(m!Jm%HPqou$W# z8z);sfhZsnSOqKxR@z{G6Y!f-6FmnP*I5g!29^RVeXtzcNk^A9{Ljjqg>g08nZ<_2%TsX|CaX6yhw>U2zSO%;BSTE~h%6;pBpw1mE7tUJt>xM zzYO^p;2X*(Zd%n!uyfpRHbB-jJ+S)#$TMi-&?g`!oT6hwJ%$38 zYk@FeHLwbZ044)nfDV9u$H3?awo9cy08je9k@f{X0^SC`1U>-XwbM_LzQ19THBiOh zeiq~~26!L%0C?9()(ETtDI5_iTKE^h3w?2PhxCXrxu= z@RIsp;(X#`;6va&8($%P$Hvf@>48;n{x86rc0Fw8XTVzk+spbp6679ff!1(fY~)7F z5^!k2wQh`&weQ$9`|^JvR@e-mpb{rnVqZWVFnYk4&E{6T;Y-MWkV^b_RmfsI@Hs%4e+E!)-2Q?oLAb(uP8x1XYX4CGJyr~D~vr(V$yd;?Gq2qXWoIQMFhfck04!Y`12erAY_CkI0`w@$-Yv*WBz0 zuKf~a4{$u7=T0%cG4n&l#(m}Q+@>CI`v7naFNL&J!~-IKKZ3_D#EaVQx6QopJoNj? zPWdwy+&{Om+BV}tqXX;~U*{AsP|AmVG4D0as z+V&9xJZ;Nf&j;CgJwL!XRLl(=vExA_XQs~BfO%79@{jU5*srf)u6ZU>hx{4;vF|kT zv*}MwpZY2K|G_H$U+n)G|FJ)&{F!F-{m6fU{=c`Q{~v}rKL(sJ0Jquyf9&Z0zs9*7 z0~+}=uAuxo0lW?|B6?Z?gNUuvEcuBorCkQ1nvhO0B#33_nPs1p2wHAANaZeXav;v zgL>H45V(l#PTT&_St{k8YQTAplzENx)O)Kb)ydtks^yy4%XQwAK9(=4VV}nMp00CP zr#kUk0M0*OrQU1mVg1$Y?P@Cd?(@~2$MpA{(2nYMd)O}D_L{by%X$m*!<@0U&uz0V zsR{WXv*pj{0`|OcW-N%<@OEBf>-6*eelNj!edqvnGwb{WYkuTA>34Okn|E}O#4KW5VayKb;K<6<}bZqCW8g<#GU9+#? z{HpUgfR?o_b6v*cO6w1HD6;%FB**L)(q1Q-o)PWOGl%n>tQd3wvygwA1B+@8gq+P>E@MT`8Jq3*Q9eqozo8D~Z{`_IY{>cv#&+)joR?-SS!lWD zmj^x@;j!ok>O;mR?$0KF`2H7o9L5R-jxl&%S^62?kzNdN{6%?DR*bhf{xa$3kbi&3 z|2bR!BOq^%P8fD)Jgj@dQzHMY#S z;6nKLJwE-vo&$njY)qa~S;r$g?c>XmJPX=Z!#h&iKm9((US^C)*=Nk{LjHYi`QHlp z4u_nx%@JcC{lKRvALEcepF2>mC~MZqvFJAd^?*1n@+UV(LT}VM068zSO{_Jiw`>3EL!ke?s8*L)zY5c1zQw~_Yg@a?^9`Ey;!3Faa59^?$q0xE>;d9Ob5 zr~jvY(k5y5v{{pW2K(>l5PWV=-_N=U#*=&|&6Keq=aNqw?OLRqgic#KN`np<-jQAe%mVrVJ%GMIf1qC`T;pu{--7YoG|0KHjs8f9en1bPH_AiQ zhMQ_3{$oGSwF1t%dSiPaz-zJoai^0D#P{BDU~sKA7!wXjo^iIG<1x>MPG?)r^tHgf zv45_=p#KjALV&qG2*&m@jQ{_2eC3t(A>+ls9AK^wxNjcjbmwAC$vK$Qc@F1sO;`}X zdgnM&?&F+8U-E$TwAvn?_37)OzjvcwIIY({({h*VTNcKEF0K#6e1OQ_Yk!<$<9s2J zG2iCghW)$#T;9|L|L%sJ8@L>K?&G$8E`s!JU?Z>ySmFTJm~!75KtF#%Dsm_%h6!2f zaTe0<0ONwL0M`L_0k|HlC%|<9-1loaxox=(n*qqw$o~iAI|3Xla&FMf6>}fwiU`gn zitU#p?SVXv{Auss0-Pt{IFTv!;2VItK#T%J{uM+1W{ybYUykJMdn>T#oa|rZ?@LxW z-1d<_<299^WhDIsxhf$A-jEK>Gi3qyvzr zkw3?I3xTP?bQ{di0Hy%5fT_H7q@n}mL_FQ1TN7&NKt* z7QW%__=Q_m9mnP=n3Kz~eVFETTjmZNZdsKxx4G#!Azg1<9nAr(4!=5P`?z;Iqk>fL zw#Ss)+m_i@DqJ7Y(caO?<*ng1d)u<~SGzLq$y;4U3+?7umVJSYZTlAO=HV=FeZJdv zb5Y(=ACS3yo6mL~zuTHU-6oQrS-ak)ly#lX@Q>>-ni!FJyhq`TL(c6K)R4LDy@8y! zy*HFu?qHNHJ)S8dZsM!%9d&ZtW&O8`3j$B|o^Ud)<2#4aPV4(@n?p&_Gu+8>a|2S7mj~=W zyvOhTj{3lI&Hlmigmkk4=Jv1x=Jm8Z3wod4@9`I>h!Z!BccsKH2-vb^ZMOTJ`h?{Q z?HO>^fjz6vJ$^Ky>fmd0zRwxoS#I*NpjYYn-@bYAloCJBvu&IAzM~g+^dv=1zAhsGk0e0 z%sFSuy*o2!COvhJEGsR6?jhn;q8_N>&$80uS$6sXmXmpiod~mM(TjCi zSM=_89nJ1wwm`k!oEZ}wGZ?$Jp#@S%m_2PTHuERYx>talSM&?AwPKbYm z`$t*n2TD@5_bCmVC6q=kl&PZ@iKUQV!XvmPkY7dJJcKUdP%j1?`(AU_E&2*M?)g4% zl5SoHO-=UYa$h*MqH*T=uy878$XuFvDgJ)O#kdEckUg&Sa_$NBv7pab)O>-(Ez@N& z--#@GiAF}q!X#9X8~PDSv^pU-0rf^W4Y@xdSwrRsBJN*XT{wSqIoE&Y`Ox<0YL1 z4Jo{w;C#2}qSxIC@}p)L$29Uz_#`phct8wz_BS2+41N*Ddp>d5)R<*+&E#}^7&2iFMMDQkuE z$*Y844y+Wyl2!<-Qr7Bx0slqEe{953_gj@7*=ztEh#L7NJyf8i57JHYS{aL9R$eD6 z=zwG=J@9@4Jj^6s@Q#otE`RkpKz_W-$eDwa1izU#5TRs zeOv9B>o`YtFmNzCvO!=e8xXgG+?Cx@y*Rk;iFzRUsn2BK$zRd|(N}qGo21b(+FcTt zf3ffo^;Qjj)sTTusznLtql4>ZC8xFQq~EO{ps@KS&e?Q?c?A& zIg9ldE4K;i4<2fDFrVbcJMu60i9=s~InV@U^_ryJ>f+P|0h zug`3&m7}6;6u+idthTRB^cUvD;{Anh(?>sVkg=aPYH(zej2+!vJ$~MRHji>YT!Z+O zj+7>+xaVnu9@~QSLD9dkIN|{SFY1`Zf*SO}f#pJXk|%!Yi+vwxb<+&g40sDL0vZhc z)UNTr2iX{R>^7+R4qwBA0e<>y-vYgogtdAs2}lH@SL(Ace*+e}^l^kOGhk6G3|Rac zLzc8opCzo(JQK4@pM@ig;_uf!L-*uQX-e_p*XZN(W!mt6h+vm8N-Sr)3&fG>@s)rBk#s+=doelZ48yi2$iY@Z&!Z!MKVn5F9$bR^? zBU|s&i7oW#!aT-wWg`dMutA@6XP?tEpw1(#*}RG6@i)%u#1`L-I4r2^#Rhs&d&#SZ%biU_6d!$gM`A&y#l+u-?;RbA3Lb$PUx!MwNRzb zi!f8)jx$x?O;D#cxI%o2OZU0^&3Sqw zhuEoC&#+McwzC~iyJB2QpQjyfinD{2Y{z^B^Zv$M9lxeNJGWDIGbdPhoA}MZd0OC~ zU zcZ*`oZdQn2jPqdsq?Zle8{qRzlJ)9-LG&AMF5fh}h2F+*TgoPm?(|f-=^5!F9Y}7XX@PPP={8Rb z5Z}i6b&7%r6XTryO*&r+sW&(+q}~8v;HBWYgL6WgSloy=`DbB#tHFhFtp063^gQpQv=j#Q(8|t6)3nO&Ng7x={@4= zlL5lYtFcW#FFL5$j5sSPF5FuS65sv;VG9svy*AwqpmUzZAL`^gs4KBq!Sxt)Py`)Z zgsd6BQD8mLhtdFjVI9!3D1TX6NL_x`9?s`|4`=nw1?aq9&(ndz?2G&Kzl8kLaUL(7 zvqa}Fc^4mSO|rWn4xP(88P9p03J|;@|44+)|bBbe<~CQI+9LbvfFbPGPheZ=?B4|_F6`5%@~88t>HGN|4hg~9AR`W z5y?LZ^7}#lkqGlTvsa|^i%GAf2TH$Me)A$)asSM{b#!pPB-#P`gUHq;>3eh~nMwXf zT9Eqy!1)KQHqB?%HSi+I-w*E9z#IZ(Tmkt&7;*xF`0e(;7iRu7_;cVsi=xk-+5&8m# zvvtp+KirRYa0hadE(kY)D4-cV1Nx%!^L`Y3wF9Vp+_ji;E?CzR@{fi*bT&1eH8SjFN&Y`_;-bYhqA#ofbZ|AJl? z1HPyavvEKD;{G~)aK7g}Z9Iy*1n5Qlf!63*F6iKe z`#((z<^QQX0J#=CAGn(XRA;IE&O|-_7Viajl=C-e6Lj{(O!N)YpodxMp>;>0UCc)u z)C++?b^bHT0aOo=(&lx%Jm+#(x%{o?Xyq@#`|vjU1qH>rysxobEAvZ}yFB-D7kK66 z584Dk8|Qr%>A)BAkAwVUAv^VTQ*f5+WXSKG7FuUG_d)r80r}^l z{7FxQ`M{^d189wS$9tdWdp+#!#whnY1yRb)^!&}3b~S206!&FZ;8&8r0o-eW^}rh7 zNAzdz=;x{Yw*zZ{_2>guUfN%$KidB?wDkqRd|)-=zDN9k)<}Mei}L1qLl<4~ZNT5} zN* zjGwFg^0f5;bspbGfA4c`AU`i#&_2M%qGSbqC(xNa>wtkIKWL51pW@SnKA*k|d0o)> zyq^Ckb9yh7fzGfdP9T%01@E)qeg`1A>3lN!UZC$F-v5C1w@!zBYlr{B-crQMN z*WQ2sH|emJ*IEtKYM@pFwHm0^K&=L9HSi)Dc=bCG{M8&vwA+bfYd2o;(`*4l+pG8@ zZQAI>gz~^HAGGmFF@9d_Yi@bOY|;wuC7ML{zS_NB`bfK+m^|SACVez}z#t)hPYKcm zs&1Iv&^O7Ksk794M-b2~mSm*$*G4>uxDU@|vH75XUeeHenOW=<-D_!avI*`gTxt`| zjXJ9mHPPI^(*Lwigat4fDBCB(3^%n`tp;i}P^*Dj4b*C&Rs*#fsMWwr(Lma-Tg6K! z*NG>?MvAbX_}h?%X(2Z=YxN>r6uD2_ne@S*pnUQYyhS5`j&b{L1xAo_EICAVubdh;Lv= zxdS$aCvvinWS&2^DnD_Z;(F9VAq{o`zrZeIKd>0<;r|WWL1o3{A;*8zZZ7LmET6kh zPYom+u>i>OfNTRZFU7H(tYhrg_^B*vu^x+nT^#HIE800pZnFJ53i}pM$immMMxdRL z92Mny74GCsop>JUXM=q+FUEl9SeAK_5M6RP_vHOE2WFJOp06}!iA){y9oaLIT_Z;c z%7W}M$VQ3e;5KWZcOZ*D}y201pTtPym7nj(lVtDk~iG`uerIw7BXWaFE(N`y_64E8~= z`GOr#@*45pk?)0b5u5uySh>)N9p7Qh{SUz&sLbXGc0`i>R|;$b$v(1z9K=7jd4deF zu(9LoTwsgy+;yqK`gU>DP%ClcG$Xzq0qJfd+rxyFvipZO>A;Rlf^KQb27TDz2rOiM zKelPzY<4oxoY$i&_Dp0C1xR*FWZM`Adp=5+Kr)bxBjk!qSZgS^AJ{>h=42*&n$6PF z+WEm18u^c`5t_$;C)|R4DEoPnObt}EN!lc;PyV3K&g^asdnA$KRgD8XCdm#-vTGvS zqrTw+{Y&*(#4;V&9qF_9)p{(vLLAAKC{}AnMD|2vmqh-O zeNw<2J+{L~mj!*RTe5$?4qNT{#)wABubO@_#JU~$AzKmRi)@9OYwsIWlmmW-z&pSm zfWg-2*oPnV_W9P$Z2$M(EnwHvsbusp8}*k%ZPfl#I;;0A?x2oX(M}x>>|WGCy>fbI z^+X3NHSB<34`ZWV;MqkTh&Yie+o?mAwujw|N)7uU^=JLNtH**a^6CtmnU3nvW$k#J zeg5s$>%BX&u_G+l=L2li)4y)_0C_DQ@KKNPu=DT*Pocmg;5T3h(4-RoyuHAm33$WX9tsb4-)sCIq-WIkhO4zs%$kxSFa}T>4*n6TTJaVOripdivbAIKv+64=G_t*lpO5_)GmshwC?bO(WSPr;a)uEvw(}1pJeYjeM4?IggL^`gJb_ ziY@ZPjay^{w@{wlW9WDx*udp{kiKjBzWN{CN@_joMuN#VHxo@<^J9%a{w=`ZOT_u+ zf+qZ9gnuc8=f$-8%=-<4_AC%5ZPuY z?th1E3)u@yN)ME^0qt;LU0p+x<$2c%!^VNj0@@f@2+YCS&n4$VbX{rPGuFM%#X8XM z3KCn4hmD6HY&_;8e9_rp9XG58r0{tNUjiEt3in3%JfyJ{uqS>%%j&|dnD#uc+womY zuZOkBy@av{zqH2vA&>=31$f;6<#8#ECtZ+RJRd0X^i%iBX$|X4fNVOZ!=_~{_?v{a ziIcEa&l_v>UEn_{b+3%ptj;(etmlGoFIv}%@RIlS^hu5qfM}G&Z%t%CK3q=F#=t6IIrv9}aa=iN!+;V$4GYjMRDNMi%wNPK{n>HlU- zTkgi1+i+TUOfuwzG}s6|27}kLz%gJgP@kR!eP$U@Jh~_!pgo`hI;RH7j5EUOeGN=b z534r=wBME3CJvWl-k5SFrpXM@_pz4P9{y8+ndxD5`(iDv37+ku1^$LH9gA(8kc`LSPp7T6{K0*Y(a}Ghfv8xo8&)vF_Cs z<=}HV5NqWFgr#T`Us9fMSIb8w{x4|wSDu9Kbz$?tpCNq%I?to`igH3%R0jNe8uSgo z8<+@LW}FYvrS{;-+rvS{G_>cj@b^M{AloLgT^UYkf$HEHN~cQM-;8d<(}u1?iGTD- z@6j``;o<(|{;QXiFR0WWmVwXj&jssDEQa2}KiQnELw_>??O`Uh2b9k;=>H4K8}4fL zPhk?DXvc#AZhsCro*18=dC^O&!w#TSpDocIT7dty=r1j>cFY26rCVK&eq$`P2T*U& zzLXc-HRcEF=4%}HMdhgc9CXj?1*j3~(*j^N;E#Ta*MGR5$rHXsWYGIdt232(l{7dP-4hsIb{y~j^<-k&41K^;Q>u=4! zq%-hKWzh++1grsW>kL|@-;tiFr7u?l&^48Tgj&77T=mwbR;z(p4ZLOzXci%94pPEl zQrb0H{|QQqwUEz!THOU&Osl#;vng-j038_y>PO4KK%pliE!D3;bKx&y$1GYlCfE9l zL~-3FdfC*f#6)vnf?ucXvX$w5qPXr;xZ0`@r;H^&;P<9|c>WZD?wR@-_fotUqqI&- z0lW*8t<&P8F7T+mYBf-+f&WSkWS$Eae~tb|j9yfAoZwXmg!yDK^HL1t_Q&14xKrZOMe@qO{T32$+Jv3FuKq?d|$v)ts>!UgP!dJc2Bu`%Cw zxV-QCzS!GORP%X5%;)lb6)|r}dn`sT5lc`me4j(`kpUgGp}f{iY9jyWpS?xQKS_Ia zRr{L@^L2a-JmEXRhvx5&Zq|8pY>N&%vRRiM-z>1iE$vwJj|13=tpcC-llB|r`x$Q0 z%nyQJ+Hde7&HZ72!K@>zHG8ZjFMo1bK>j9}k1`oHpu6H9@AWg@?rnG?XaVN#mK!|6 zd|WALDfav;3HR46jb1KOM=aM@!ynW@jk!9GNUdKu`7PB~@1CXmaF?Gh=I`o6={I$1 zf;l_d%a7*lXrDh5fRCw@e&BA3{5N1fydRuKSPb94w9)NtbK0>vZWc5bhy5tc*^w>g zEM|2(wq#NlHpSVJg)P_kAKKK0t(mD}o{nAFu7w>~@`g6-_z&jn@a8sb)$|SzX1glb z@~I}9di8#<$H*Zz9+Pu^>U=RuHe4o)gi;{z9za zW25r{#x1Xenp)k6ZtIg7RKM`tPRuRrl#R{YD_WDhPrER8!|NyLR~Uo+7IQvc826iu zF+QK{$cFBi@A1L7>y-S2w>;tJOJlMaf2H}8`FN%`rHyfOdDdPWSBeYs4Kdll^=aOU z=6h(aWHHMBjeSDIDH!9Q znH6C)24kMy(1!=+k|d|Fv+^13=@LFIB()YkK-S%R{3JHM(HR#xPQ% z#%M?hhE!lihanch_lO{L2kpari?#rBSds5q98|r975XNEB54V5R&&uf1r19mfUAUH72=!uO5{TE5O|m#y+qcO z(podQiM(IGz9PO!IX?$ih@8&X5#&d=ciMD5@vCd6BL-hhUS9|6-LPhHwWy|VX8bNK zOk67blhO#ESN=Yn{}>jG0TLK$mK@reJfgFZJG}Dvo^#&*M{mS ze?8i(`oY| zrvl~w$OvyRH6z?;dS-;tw6xHAuE%x=J|5vj z9?hDkGkbOFU>_p{z1 znlV!vH!Vl8uOMWb07in4gkLlFLJ@YiJiKCDQYr~cjjkm?KQ?Hwq94!@A?AW57T?st z9fgZn#N#A52$O`dLJvVvA(X-0%>WAZy6x~mB?^9inm)LlfryIq?z`H%z8C=)EMW9Y zF?X4h4iyUR9lCB^p&|ZL*w;|5G$@nKf94Dmh6rB?!-V&72+0tkzc2tBR(=KgneYMJ zbj9lwVTfJi?wbDVf#@RA^AbEz3T`N)4?xGG#QNjTS#U=}w41jGOJ(WLTIkE|mV2Vq zMPVSGbVnIaLK%928+*YOPt(;8{$qr3xbuKS@AK3t4+^zL{SeFY8ph*zB4>Am(`6xa z)aBECeg$<$!!G1r_9S z6Fl*M0{+~Qh8JQvavuBPzlajChp&`cxz+^1jJJ@X2&Fpii9D%3m&=G#g;2_Z(6i4} z*YS$UP9B~v?ry!5mYpq>3MaQQ?v5^QV|yw8HS`0Oty1CXW$)%_Ki=KVsh4t^lc%yz z@A`Fm+IxCBxjKxWra%I2p1qWlJluMCj``Zj)!tL(>N3W|-P7IKOEt#bwTHc@Yv;+9 zN`*CU}93-u>LijC1nn*`=bNHX72KVDIJP;4T3x!IhWt0Nx>ghRY z!UT5@FDJ(VLlpfTdnsqwI$1b(x3}!7a&UIAQdv7Y*{U4u>}^!mw$6?=7WS4l_ICE( z$}W}DshE*%XUm%8Wa-?kd$%#&yQyp~oGew=U9Id?b~X+UDqAZ@3+HZ*7LNAaZJ&|T zFc%bRP4aT;YGG+_U5ob9ZhslJ-zV{PBnxvQ;{mCDA-!A@mu zZEdM?u(unda_na3Xl-w8-_6OYlJa_Xsivaz?DAOMq88CbY7!W^Iil?f<-&zBS6-Rx rewoXFe>A?pqcwaziP?P>Yq#EJ> literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/manifest_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/manifest_test.go new file mode 100644 index 00000000..d7be25b1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/manifest_test.go @@ -0,0 +1,3 @@ +package functional + +import _ "github.com/Microsoft/hcsshim/test/functional/manifest" diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/test.go new file mode 100644 index 00000000..0f3507cc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/test.go @@ -0,0 +1,47 @@ +package functional + +import ( + "os" + "os/exec" + "strconv" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcsoci" + "github.com/sirupsen/logrus" +) + +var pauseDurationOnCreateContainerFailure time.Duration + +func init() { + if len(os.Getenv("HCSSHIM_FUNCTIONAL_TESTS_DEBUG")) > 0 { + logrus.SetLevel(logrus.DebugLevel) + logrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true}) + } + + // This allows for debugging a utility VM. + s := os.Getenv("HCSSHIM_FUNCTIONAL_TESTS_PAUSE_ON_CREATECONTAINER_FAIL_IN_MINUTES") + if s != "" { + if t, err := strconv.Atoi(s); err == nil { + pauseDurationOnCreateContainerFailure = time.Duration(t) * time.Minute + } + } + + // Try to stop any pre-existing compute processes + cmd := exec.Command("powershell", `get-computeprocess | stop-computeprocess -force`) + cmd.Run() + +} + +func CreateContainerTestWrapper(options *hcsoci.CreateOptions) (*hcs.System, *hcsoci.Resources, error) { + if pauseDurationOnCreateContainerFailure != 0 { + options.DoNotReleaseResourcesOnFailure = true + } + s, r, err := hcsoci.CreateContainer(options) + if err != nil { + logrus.Warnf("Test is pausing for %s for debugging CreateContainer failure", pauseDurationOnCreateContainerFailure) + time.Sleep(pauseDurationOnCreateContainerFailure) + hcsoci.ReleaseResources(r, options.HostingSystem, true) + } + return s, r, err +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/createuvm.go b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/createuvm.go new file mode 100644 index 00000000..1f45bc07 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/createuvm.go @@ -0,0 +1,76 @@ +package testutilities + +import ( + "os" + "testing" + + "github.com/Microsoft/hcsshim/internal/uvm" +) + +// CreateWCOWUVM creates a WCOW utility VM with all default options. Returns the +// UtilityVM object; folder used as its scratch +func CreateWCOWUVM(t *testing.T, id, image string) (*uvm.UtilityVM, []string, string) { + return CreateWCOWUVMFromOptsWithImage(t, uvm.NewDefaultOptionsWCOW(id, ""), image) + +} + +// CreateWCOWUVMFromOpts creates a WCOW utility VM with the passed opts. +func CreateWCOWUVMFromOpts(t *testing.T, opts *uvm.OptionsWCOW) *uvm.UtilityVM { + if opts == nil || len(opts.LayerFolders) < 2 { + t.Fatalf("opts must bet set with LayerFolders") + } + + uvm, err := uvm.CreateWCOW(opts) + if err != nil { + t.Fatal(err) + } + if err := uvm.Start(); err != nil { + uvm.Close() + t.Fatal(err) + } + return uvm +} + +// CreateWCOWUVMFromOptsWithImage creates a WCOW utility VM with the passed opts +// builds the LayerFolders based on `image`. Returns the UtilityVM object; +// folder used as its scratch +func CreateWCOWUVMFromOptsWithImage(t *testing.T, opts *uvm.OptionsWCOW, image string) (*uvm.UtilityVM, []string, string) { + if opts == nil { + t.Fatal("opts must be set") + } + + uvmLayers := LayerFolders(t, image) + scratchDir := CreateTempDir(t) + defer func() { + if t.Failed() { + os.RemoveAll(scratchDir) + } + }() + + opts.LayerFolders = append(opts.LayerFolders, uvmLayers...) + opts.LayerFolders = append(opts.LayerFolders, scratchDir) + + return CreateWCOWUVMFromOpts(t, opts), uvmLayers, scratchDir +} + +// CreateLCOWUVM with all default options. +func CreateLCOWUVM(t *testing.T, id string) *uvm.UtilityVM { + return CreateLCOWUVMFromOpts(t, uvm.NewDefaultOptionsLCOW(id, "")) +} + +// CreateLCOWUVMFromOpts creates an LCOW utility VM with the specified options. +func CreateLCOWUVMFromOpts(t *testing.T, opts *uvm.OptionsLCOW) *uvm.UtilityVM { + if opts == nil { + t.Fatal("opts must be set") + } + + uvm, err := uvm.CreateLCOW(opts) + if err != nil { + t.Fatal(err) + } + if err := uvm.Start(); err != nil { + uvm.Close() + t.Fatal(err) + } + return uvm +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/defaultlinuxspec.go b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/defaultlinuxspec.go new file mode 100644 index 00000000..2f0b0197 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/defaultlinuxspec.go @@ -0,0 +1,21 @@ +package testutilities + +import ( + "encoding/json" + "io/ioutil" + "testing" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func GetDefaultLinuxSpec(t *testing.T) *specs.Spec { + content, err := ioutil.ReadFile(`assets\defaultlinuxspec.json`) + if err != nil { + t.Fatalf("failed to read defaultlinuxspec.json: %s", err.Error()) + } + spec := specs.Spec{} + if err := json.Unmarshal(content, &spec); err != nil { + t.Fatalf("failed to unmarshal contents of defaultlinuxspec.json: %s", err.Error()) + } + return &spec +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/defaultwindowsspec.go b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/defaultwindowsspec.go new file mode 100644 index 00000000..e1fa541a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/defaultwindowsspec.go @@ -0,0 +1,21 @@ +package testutilities + +import ( + "encoding/json" + "io/ioutil" + "testing" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func GetDefaultWindowsSpec(t *testing.T) *specs.Spec { + content, err := ioutil.ReadFile(`assets\defaultwindowsspec.json`) + if err != nil { + t.Fatalf("failed to read defaultwindowsspec.json: %s", err.Error()) + } + spec := specs.Spec{} + if err := json.Unmarshal(content, &spec); err != nil { + t.Fatalf("failed to unmarshal contents of defaultwindowsspec.json: %s", err.Error()) + } + return &spec +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/layerfolders.go b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/layerfolders.go new file mode 100644 index 00000000..6fc23dd0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/layerfolders.go @@ -0,0 +1,54 @@ +package testutilities + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" +) + +var imageLayers map[string][]string + +func init() { + imageLayers = make(map[string][]string) +} + +func LayerFolders(t *testing.T, imageName string) []string { + if _, ok := imageLayers[imageName]; !ok { + imageLayers[imageName] = getLayers(t, imageName) + } + return imageLayers[imageName] +} + +func getLayers(t *testing.T, imageName string) []string { + cmd := exec.Command("docker", "inspect", imageName, "-f", `"{{.GraphDriver.Data.dir}}"`) + var out bytes.Buffer + cmd.Stdout = &out + if err := cmd.Run(); err != nil { + t.Skipf("Failed to find layers for %q. Check docker images", imageName) + } + imagePath := strings.Replace(strings.TrimSpace(out.String()), `"`, ``, -1) + layers := getLayerChain(t, imagePath) + return append([]string{imagePath}, layers...) +} + +func getLayerChain(t *testing.T, layerFolder string) []string { + jPath := filepath.Join(layerFolder, "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + t.Fatalf("layerchain not found") + } else if err != nil { + t.Fatalf("failed to read layerchain") + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + t.Fatalf("failed to unmarshal layerchain") + } + return layerChain +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/requiresbuild.go b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/requiresbuild.go new file mode 100644 index 00000000..fb0d7939 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/requiresbuild.go @@ -0,0 +1,19 @@ +package testutilities + +import ( + "testing" + + "github.com/Microsoft/hcsshim/osversion" +) + +func RequiresBuild(t *testing.T, b uint16) { + if osversion.Get().Build < b { + t.Skipf("Requires build %d+", b) + } +} + +func RequiresExactBuild(t *testing.T, b uint16) { + if osversion.Get().Build != b { + t.Skipf("Requires exact build %d", b) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/scratch.go b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/scratch.go new file mode 100644 index 00000000..ab0d8589 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/scratch.go @@ -0,0 +1,59 @@ +package testutilities + +import ( + "path/filepath" + "testing" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/lcow" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/wclayer" +) + +const lcowGlobalSVMID = "test.lcowglobalsvm" + +var ( + lcowGlobalSVM *uvm.UtilityVM + lcowCacheScratchFile string +) + +func init() { + if hcsSystem, err := hcs.OpenComputeSystem(lcowGlobalSVMID); err == nil { + hcsSystem.Terminate() + } +} + +// CreateWCOWBlankRWLayer uses HCS to create a temp test directory containing a +// read-write layer containing a disk that can be used as a containers scratch +// space. The VHD is created with VM group access +// TODO: This is wrong. Need to search the folders. +func CreateWCOWBlankRWLayer(t *testing.T, imageLayers []string) string { + + // uvmFolder, err := LocateUVMFolder(imageLayers) + // if err != nil { + // t.Fatalf("failed to locate UVM folder from %+v: %s", imageLayers, err) + // } + + tempDir := CreateTempDir(t) + if err := wclayer.CreateScratchLayer(tempDir, imageLayers); err != nil { + t.Fatalf("Failed CreateScratchLayer: %s", err) + } + return tempDir +} + +// CreateLCOWBlankRWLayer uses an LCOW utility VM to create a blank +// VHDX and format it ext4. If vmID is supplied, it grants access to the +// destination file. This can then be used as a scratch space for a container, +// or for a "service VM". +func CreateLCOWBlankRWLayer(t *testing.T, vmID string) string { + if lcowGlobalSVM == nil { + lcowGlobalSVM = CreateLCOWUVM(t, lcowGlobalSVMID) + lcowCacheScratchFile = filepath.Join(CreateTempDir(t), "sandbox.vhdx") + } + tempDir := CreateTempDir(t) + + if err := lcow.CreateScratch(lcowGlobalSVM, filepath.Join(tempDir, "sandbox.vhdx"), lcow.DefaultScratchSizeGB, lcowCacheScratchFile, vmID); err != nil { + t.Fatalf("failed to create EXT4 scratch for LCOW test cases: %s", err) + } + return tempDir +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/tempdir.go b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/tempdir.go new file mode 100644 index 00000000..ebbe6ae5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/utilities/tempdir.go @@ -0,0 +1,15 @@ +package testutilities + +import ( + "io/ioutil" + "testing" +) + +// CreateTempDir creates a temporary directory +func CreateTempDir(t *testing.T) string { + tempDir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatalf("failed to create temporary directory: %s", err) + } + return tempDir +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_mem_backingtype_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_mem_backingtype_test.go new file mode 100644 index 00000000..70b1b6f7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_mem_backingtype_test.go @@ -0,0 +1,107 @@ +// +build functional uvmmem + +package functional + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" + testutilities "github.com/Microsoft/hcsshim/test/functional/utilities" + "github.com/sirupsen/logrus" +) + +func runMemStartLCOWTest(t *testing.T, opts *uvm.OptionsLCOW) { + u := testutilities.CreateLCOWUVMFromOpts(t, opts) + u.Close() +} + +func runMemStartWCOWTest(t *testing.T, opts *uvm.OptionsWCOW) { + u, _, scratchDir := testutilities.CreateWCOWUVMFromOptsWithImage(t, opts, "microsoft/nanoserver") + defer os.RemoveAll(scratchDir) + u.Close() +} + +func runMemTests(t *testing.T, os string) { + type testCase struct { + allowOvercommit bool + enableDeferredCommit bool + } + + testCases := []testCase{ + {allowOvercommit: true, enableDeferredCommit: false}, // Explicit default - Virtual + {allowOvercommit: true, enableDeferredCommit: true}, // Virtual Deferred + {allowOvercommit: false, enableDeferredCommit: false}, // Physical + } + + for _, bt := range testCases { + if os == "windows" { + wopts := uvm.NewDefaultOptionsWCOW(t.Name(), "") + wopts.MemorySizeInMB = 512 + wopts.AllowOvercommit = bt.allowOvercommit + wopts.EnableDeferredCommit = bt.enableDeferredCommit + runMemStartWCOWTest(t, wopts) + } else { + lopts := uvm.NewDefaultOptionsLCOW(t.Name(), "") + lopts.MemorySizeInMB = 512 + lopts.AllowOvercommit = bt.allowOvercommit + lopts.EnableDeferredCommit = bt.enableDeferredCommit + runMemStartLCOWTest(t, lopts) + } + } +} + +func TestMemBackingTypeWCOW(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + runMemTests(t, "windows") +} + +func TestMemBackingTypeLCOW(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + runMemTests(t, "linux") +} + +func runBenchMemStartTest(b *testing.B, opts *uvm.OptionsLCOW) { + // Cant use testutilities here because its `testing.B` not `testing.T` + u, err := uvm.CreateLCOW(opts) + if err != nil { + b.Fatal(err) + } + defer u.Close() + if err := u.Start(); err != nil { + b.Fatal(err) + } +} + +func runBenchMemStartLcowTest(b *testing.B, allowOvercommit bool, enableDeferredCommit bool) { + for i := 0; i < b.N; i++ { + opts := uvm.NewDefaultOptionsLCOW(b.Name(), "") + opts.MemorySizeInMB = 512 + opts.AllowOvercommit = allowOvercommit + opts.EnableDeferredCommit = enableDeferredCommit + runBenchMemStartTest(b, opts) + } +} + +func BenchmarkMemBackingTypeVirtualLCOW(b *testing.B) { + //testutilities.RequiresBuild(t, osversion.RS5) + logrus.SetOutput(ioutil.Discard) + + runBenchMemStartLcowTest(b, true, false) +} + +func BenchmarkMemBackingTypeVirtualDeferredLCOW(b *testing.B) { + //testutilities.RequiresBuild(t, osversion.RS5) + logrus.SetOutput(ioutil.Discard) + + runBenchMemStartLcowTest(b, true, true) +} + +func BenchmarkMemBackingTypePhyscialLCOW(b *testing.B) { + //testutilities.RequiresBuild(t, osversion.RS5) + logrus.SetOutput(ioutil.Discard) + + runBenchMemStartLcowTest(b, false, false) +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_plannine_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_plannine_test.go new file mode 100644 index 00000000..7e65c4b6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_plannine_test.go @@ -0,0 +1,40 @@ +// +build functional uvmp9 + +// This file isn't called uvm_plan9_test.go as go test skips when a number is in it... go figure (pun intended) + +package functional + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/functional/utilities" +) + +// TestPlan9 tests adding/removing Plan9 shares to/from a v2 Linux utility VM +// TODO: This is very basic. Need multiple shares and so-on. Can be iterated on later. +func TestPlan9(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + + uvm := testutilities.CreateLCOWUVM(t, t.Name()) + defer uvm.Close() + + dir := testutilities.CreateTempDir(t) + defer os.RemoveAll(dir) + var iterations uint32 = 64 + for i := 0; i < int(iterations); i++ { + if err := uvm.AddPlan9(dir, fmt.Sprintf("/tmp/%s", filepath.Base(dir)), false); err != nil { + t.Fatalf("AddPlan9 failed: %s", err) + } + } + + // Remove them all + for i := 0; i < int(iterations); i++ { + if err := uvm.RemovePlan9(dir); err != nil { + t.Fatalf("RemovePlan9 failed: %s", err) + } + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_properties_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_properties_test.go new file mode 100644 index 00000000..15540c69 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_properties_test.go @@ -0,0 +1,48 @@ +// +build functional uvmproperties + +package functional + +import ( + "os" + "testing" + + "github.com/Microsoft/hcsshim/internal/schema1" + "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/functional/utilities" +) + +func TestPropertiesGuestConnection_LCOW(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + + uvm := testutilities.CreateLCOWUVM(t, t.Name()) + defer uvm.Close() + + p, err := uvm.ComputeSystem().Properties(schema1.PropertyTypeGuestConnection) + if err != nil { + t.Fatalf("Failed to query properties: %s", err) + } + + if p.GuestConnectionInfo.GuestDefinedCapabilities.NamespaceAddRequestSupported || + !p.GuestConnectionInfo.GuestDefinedCapabilities.SignalProcessSupported || + p.GuestConnectionInfo.ProtocolVersion < 4 { + t.Fatalf("unexpected values: %+v", p.GuestConnectionInfo) + } +} + +func TestPropertiesGuestConnection_WCOW(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + uvm, _, uvmScratchDir := testutilities.CreateWCOWUVM(t, t.Name(), "microsoft/nanoserver") + defer os.RemoveAll(uvmScratchDir) + defer uvm.Close() + + p, err := uvm.ComputeSystem().Properties(schema1.PropertyTypeGuestConnection) + if err != nil { + t.Fatalf("Failed to query properties: %s", err) + } + + if !p.GuestConnectionInfo.GuestDefinedCapabilities.NamespaceAddRequestSupported || + !p.GuestConnectionInfo.GuestDefinedCapabilities.SignalProcessSupported || + p.GuestConnectionInfo.ProtocolVersion < 4 { + t.Fatalf("unexpected values: %+v", p.GuestConnectionInfo) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_scratch_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_scratch_test.go new file mode 100644 index 00000000..67106824 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_scratch_test.go @@ -0,0 +1,114 @@ +// +build functional uvmscratch + +package functional + +import ( + "os" + "path/filepath" + "testing" + + "github.com/Microsoft/hcsshim/internal/lcow" + "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/functional/utilities" +) + +func TestScratchCreateLCOW(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + tempDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(tempDir) + + firstUVM := testutilities.CreateLCOWUVM(t, "TestCreateLCOWScratch") + defer firstUVM.Close() + + cacheFile := filepath.Join(tempDir, "cache.vhdx") + destOne := filepath.Join(tempDir, "destone.vhdx") + destTwo := filepath.Join(tempDir, "desttwo.vhdx") + + if err := lcow.CreateScratch(firstUVM, destOne, lcow.DefaultScratchSizeGB, cacheFile, ""); err != nil { + t.Fatal(err) + } + if _, err := os.Stat(destOne); err != nil { + t.Fatalf("destone wasn't created!") + } + if _, err := os.Stat(cacheFile); err != nil { + t.Fatalf("cacheFile wasn't created!") + } + + targetUVM := testutilities.CreateLCOWUVM(t, "TestCreateLCOWScratch_target") + defer targetUVM.Close() + + // A non-cached create + if err := lcow.CreateScratch(firstUVM, destTwo, lcow.DefaultScratchSizeGB, cacheFile, targetUVM.ID()); err != nil { + t.Fatal(err) + } + + // Make sure it can be added (verifies it has access correctly) + c, l, err := targetUVM.AddSCSI(destTwo, "", false) + if err != nil { + t.Fatal(err) + } + if c != 0 && l != 0 { + t.Fatal(err) + } + // TODO Could consider giving it a host path and verifying it's contents somehow +} + +// TODO This is old test which should go here. +//// createLCOWTempDirWithSandbox uses an LCOW utility VM to create a blank +//// VHDX and format it ext4. +//func TestCreateLCOWScratch(t *testing.T) { +// t.Skip("for now") +// cacheDir := createTempDir(t) +// cacheFile := filepath.Join(cacheDir, "cache.vhdx") +// uvm, err := CreateContainer(&CreateOptions{Spec: getDefaultLinuxSpec(t)}) +// if err != nil { +// t.Fatalf("Failed create: %s", err) +// } +// defer uvm.Terminate() +// if err := uvm.Start(); err != nil { +// t.Fatalf("Failed to start service container: %s", err) +// } + +// // 1: Default size, cache doesn't exist, but no UVM passed. Cannot be created +// err = CreateLCOWScratch(nil, filepath.Join(cacheDir, "default.vhdx"), lcow.DefaultScratchSizeGB, cacheFile) +// if err == nil { +// t.Fatalf("expected an error creating LCOW scratch") +// } +// if err.Error() != "cannot create scratch disk as cache is not present and no utility VM supplied" { +// t.Fatalf("Not expecting error %s", err) +// } + +// // 2: Default size, no cache supplied and no UVM +// err = CreateLCOWScratch(nil, filepath.Join(cacheDir, "default.vhdx"), lcow.DefaultScratchSizeGB, "") +// if err == nil { +// t.Fatalf("expected an error creating LCOW scratch") +// } +// if err.Error() != "cannot create scratch disk as cache is not present and no utility VM supplied" { +// t.Fatalf("Not expecting error %s", err) +// } + +// // 3: Default size. This should work and the cache should be created. +// err = CreateLCOWScratch(uvm, filepath.Join(cacheDir, "default.vhdx"), lcow.DefaultScratchSizeGB, cacheFile) +// if err != nil { +// t.Fatalf("should succeed creating default size cache file: %s", err) +// } +// if _, err = os.Stat(cacheFile); err != nil { +// t.Fatalf("failed to stat cache file after created: %s", err) +// } +// if _, err = os.Stat(filepath.Join(cacheDir, "default.vhdx")); err != nil { +// t.Fatalf("failed to stat default.vhdx after created: %s", err) +// } + +// // 4: Non-defaultsize. This should work and the cache should be created. +// err = CreateLCOWScratch(uvm, filepath.Join(cacheDir, "nondefault.vhdx"), lcow.DefaultScratchSizeGB+1, cacheFile) +// if err != nil { +// t.Fatalf("should succeed creating default size cache file: %s", err) +// } +// if _, err = os.Stat(cacheFile); err != nil { +// t.Fatalf("failed to stat cache file after created: %s", err) +// } +// if _, err = os.Stat(filepath.Join(cacheDir, "nondefault.vhdx")); err != nil { +// t.Fatalf("failed to stat default.vhdx after created: %s", err) +// } + +//} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_scsi_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_scsi_test.go new file mode 100644 index 00000000..38b53b74 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_scsi_test.go @@ -0,0 +1,119 @@ +// +build functional uvmscsi + +package functional + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/functional/utilities" + "github.com/sirupsen/logrus" +) + +// TestSCSIAddRemovev2LCOW validates adding and removing SCSI disks +// from a utility VM in both attach-only and with a container path. Also does +// negative testing so that a disk can't be attached twice. +func TestSCSIAddRemoveLCOW(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + u := testutilities.CreateLCOWUVM(t, t.Name()) + defer u.Close() + + testSCSIAddRemove(t, u, `/`, "linux", []string{}) + +} + +// TestSCSIAddRemoveWCOW validates adding and removing SCSI disks +// from a utility VM in both attach-only and with a container path. Also does +// negative testing so that a disk can't be attached twice. +func TestSCSIAddRemoveWCOW(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + u, layers, uvmScratchDir := testutilities.CreateWCOWUVM(t, t.Name(), "microsoft/nanoserver") + defer os.RemoveAll(uvmScratchDir) + defer u.Close() + + testSCSIAddRemove(t, u, `c:\`, "windows", layers) +} + +func testSCSIAddRemove(t *testing.T, u *uvm.UtilityVM, pathPrefix string, operatingSystem string, wcowImageLayerFolders []string) { + numDisks := 63 // Windows: 63 as the UVM scratch is at 0:0 + if operatingSystem == "linux" { + numDisks++ // + } + + // Create a bunch of directories each containing sandbox.vhdx + disks := make([]string, numDisks) + for i := 0; i < numDisks; i++ { + tempDir := "" + if operatingSystem == "windows" { + tempDir = testutilities.CreateWCOWBlankRWLayer(t, wcowImageLayerFolders) + } else { + tempDir = testutilities.CreateLCOWBlankRWLayer(t, u.ID()) + } + defer os.RemoveAll(tempDir) + disks[i] = filepath.Join(tempDir, `sandbox.vhdx`) + } + + // Add each of the disks to the utility VM. Attach-only, no container path + logrus.Debugln("First - adding in attach-only") + for i := 0; i < numDisks; i++ { + _, _, err := u.AddSCSI(disks[i], "", false) + if err != nil { + t.Fatalf("failed to add scsi disk %d %s: %s", i, disks[i], err) + } + } + + // Try to re-add. These should all fail. + logrus.Debugln("Next - trying to re-add") + for i := 0; i < numDisks; i++ { + _, _, err := u.AddSCSI(disks[i], "", false) + if err == nil { + t.Fatalf("should not be able to re-add the same SCSI disk!") + } + if err != uvm.ErrAlreadyAttached { + t.Fatalf("expecting %s, got %s", uvm.ErrAlreadyAttached, err) + } + } + + // Remove them all + logrus.Debugln("Removing them all") + for i := 0; i < numDisks; i++ { + if err := u.RemoveSCSI(disks[i]); err != nil { + t.Fatalf("expected success: %s", err) + } + } + + // Now re-add but providing a container path + logrus.Debugln("Next - re-adding with a container path") + for i := 0; i < numDisks; i++ { + _, _, err := u.AddSCSI(disks[i], fmt.Sprintf(`%s%d`, pathPrefix, i), false) + if err != nil { + t.Fatalf("failed to add scsi disk %d %s: %s", i, disks[i], err) + } + } + + // Try to re-add. These should all fail. + logrus.Debugln("Next - trying to re-add") + for i := 0; i < numDisks; i++ { + _, _, err := u.AddSCSI(disks[i], fmt.Sprintf(`%s%d`, pathPrefix, i), false) + if err == nil { + t.Fatalf("should not be able to re-add the same SCSI disk!") + } + if err != uvm.ErrAlreadyAttached { + t.Fatalf("expecting %s, got %s", uvm.ErrAlreadyAttached, err) + } + } + + // Remove them all + logrus.Debugln("Next - Removing them") + for i := 0; i < numDisks; i++ { + if err := u.RemoveSCSI(disks[i]); err != nil { + t.Fatalf("expected success: %s", err) + } + } + + // TODO: Could extend to validate can't add a 64th disk (windows). 65th (linux). +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_vpmem_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_vpmem_test.go new file mode 100644 index 00000000..eab0b06a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_vpmem_test.go @@ -0,0 +1,48 @@ +// +build functional uvmvpmem + +package functional + +import ( + "os" + "path/filepath" + "testing" + + "github.com/Microsoft/hcsshim/internal/copyfile" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/functional/utilities" + "github.com/sirupsen/logrus" +) + +// TestVPMEM tests adding/removing VPMem Read-Only layers from a v2 Linux utility VM +func TestVPMEM(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + alpineLayers := testutilities.LayerFolders(t, "alpine") + + u := testutilities.CreateLCOWUVM(t, t.Name()) + defer u.Close() + + var iterations uint32 = uvm.MaxVPMEMCount + + // Use layer.vhd from the alpine image as something to add + tempDir := testutilities.CreateTempDir(t) + if err := copyfile.CopyFile(filepath.Join(alpineLayers[0], "layer.vhd"), filepath.Join(tempDir, "layer.vhd"), true); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + for i := 0; i < int(iterations); i++ { + deviceNumber, uvmPath, err := u.AddVPMEM(filepath.Join(tempDir, "layer.vhd"), true) + if err != nil { + t.Fatalf("AddVPMEM failed: %s", err) + } + logrus.Debugf("exposed as %s on %d", uvmPath, deviceNumber) + } + + // Remove them all + for i := 0; i < int(iterations); i++ { + if err := u.RemoveVPMEM(filepath.Join(tempDir, "layer.vhd")); err != nil { + t.Fatalf("RemoveVPMEM failed: %s", err) + } + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_vsmb_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_vsmb_test.go new file mode 100644 index 00000000..4aeb0d3f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/uvm_vsmb_test.go @@ -0,0 +1,45 @@ +// +build functional uvmvsmb + +package functional + +import ( + "os" + "testing" + + "github.com/Microsoft/hcsshim/internal/schema2" + "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/functional/utilities" +) + +// TestVSMB tests adding/removing VSMB layers from a v2 Windows utility VM +func TestVSMB(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + uvm, _, uvmScratchDir := testutilities.CreateWCOWUVM(t, t.Name(), "microsoft/nanoserver") + defer os.RemoveAll(uvmScratchDir) + defer uvm.Close() + + dir := testutilities.CreateTempDir(t) + defer os.RemoveAll(dir) + var iterations uint32 = 64 + options := &hcsschema.VirtualSmbShareOptions{ + ReadOnly: true, + PseudoOplocks: true, + TakeBackupPrivilege: true, + CacheIo: true, + ShareRead: true, + } + for i := 0; i < int(iterations); i++ { + if err := uvm.AddVSMB(dir, "", options); err != nil { + t.Fatalf("AddVSMB failed: %s", err) + } + } + + // Remove them all + for i := 0; i < int(iterations); i++ { + if err := uvm.RemoveVSMB(dir); err != nil { + t.Fatalf("RemoveVSMB failed: %s", err) + } + } +} + +// TODO: VSMB for mapped directories diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/wcow_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/wcow_test.go new file mode 100644 index 00000000..0b9f1040 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/wcow_test.go @@ -0,0 +1,731 @@ +// +build functional wcow + +package functional + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcsoci" + "github.com/Microsoft/hcsshim/internal/schema1" + "github.com/Microsoft/hcsshim/internal/schemaversion" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/uvmfolder" + "github.com/Microsoft/hcsshim/internal/wclayer" + "github.com/Microsoft/hcsshim/internal/wcow" + "github.com/Microsoft/hcsshim/osversion" + "github.com/Microsoft/hcsshim/test/functional/utilities" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Has testing for Windows containers using both the older hcsshim methods, +// and the newer hcsoci methods. Does the same thing in six different ways: +// - hcsshim/argon +// - hcsshim/xenon +// - hcsoci/argon v1 +// - hcsoci/xenon v1 +// - hcsoci/argon v2 +// - hcsoci/xenon v2 +// +// Sample v1 HCS document for Xenon (no networking): +// +//{ +// "SystemType": "Container", +// "Name": "48347b95d0ad4f37de6d1979b986fb65912f973ad4549fbe716e848679dfa25c", +// "IgnoreFlushesDuringBoot": true, +// "LayerFolderPath": "C:\\layers\\48347b95d0ad4f37de6d1979b986fb65912f973ad4549fbe716e848679dfa25c", +// "Layers": [ +// { +// "ID": "7095521e-b79e-50fc-bafb-958d85400362", +// "Path": "C:\\layers\\f9b22d909166dd54b870eb699d54f4cf36d99f035ffd7701aff1267230aefd1e" +// } +// ], +// "HvPartition": true, +// "HvRuntime": { +// "ImagePath": "C:\\layers\\f9b22d909166dd54b870eb699d54f4cf36d99f035ffd7701aff1267230aefd1e\\UtilityVM" +// } +//} +// +// Sample v1 HCS document for Argon (no networking): +// +//{ +// "SystemType": "Container", +// "Name": "0a8bb9ec8366aa48a8e5f810274701d8d4452989bf268fc338570bfdecddf8df", +// "VolumePath": "\\\\?\\Volume{85da95c9-dda9-42e0-a066-40bd120c6f3c}", +// "IgnoreFlushesDuringBoot": true, +// "LayerFolderPath": "C:\\layers\\0a8bb9ec8366aa48a8e5f810274701d8d4452989bf268fc338570bfdecddf8df", +// "Layers": [ +// { +// "ID": "7095521e-b79e-50fc-bafb-958d85400362", +// "Path": "C:\\layers\\f9b22d909166dd54b870eb699d54f4cf36d99f035ffd7701aff1267230aefd1e" +// } +// ], +// "HvPartition": false +//} +// +// Sample v2 HCS document for Argon (no networking): +// +//{ +// "Owner": "sample", +// "SchemaVersion": { +// "Major": 2, +// "Minor": 0 +// }, +// "Container": { +// "Storage": { +// "Layers": [ +// { +// "Id": "6ba9cac1-7086-5ee9-a197-c465d3f50ad7", +// "Path": "C:\\layers\\f30368666ce4457e86fe12867506e508071d89e7eae615fc389c64f2e37ce54e" +// }, +// { +// "Id": "300b3ac0-b603-5367-9494-afec045dd369", +// "Path": "C:\\layers\\7a6ad2b849a9d29e6648d9950c1975b0f614a63b5fe2803009ce131745abcc62" +// }, +// { +// "Id": "fa3057d9-0d4b-54c0-b2d5-34b7afc78f91", +// "Path": "C:\\layers\\5d1332fe416f7932c344ce9c536402a6fc6d0bfcdf7a74f67cc67b8cfc66ab41" +// }, +// { +// "Id": "23284a2c-cdda-582a-a175-a196211b03cb", +// "Path": "C:\\layers\\b95977ad18f8fa04e517daa2e814f73d69bfff55c3ea68d56f2b0b8ae23a235d" +// }, +// { +// "Id": "e0233918-d93f-5b08-839e-0cbeda79b68b", +// "Path": "C:\\layers\\b2a444ff0e984ef282d6a8e24fa0108e76b6807d943e111a0e878c1c53ed8246" +// }, +// { +// "Id": "02740e08-d1d3-5715-9c08-c255eab4ca01", +// "Path": "C:\\layers\\de6b1a908240cca2aef34f49994e7d4e25a8e157a2cef3b6d6cf2d8e6400bfc2" +// } +// ], +// "Path": "\\\\?\\Volume{baac0fd5-16b7-405b-9621-112aa8e3d973}\\" +// } +// }, +// "ShouldTerminateOnLastHandleClosed": true +//} +// +// +// Sample v2 HCS document for Xenon (no networking) +// +//{ +// "Owner": "functional.test.exe", +// "SchemaVersion": { +// "Major": 2, +// "Minor": 0 +// }, +// "HostingSystemId": "xenonOci2UVM", +// "HostedSystem": { +// "SchemaVersion": { +// "Major": 2, +// "Minor": 0 +// }, +// "Container": { +// "Storage": { +// "Layers": [ +// { +// "Id": "6ba9cac1-7086-5ee9-a197-c465d3f50ad7", +// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s1" +// }, +// { +// "Id": "300b3ac0-b603-5367-9494-afec045dd369", +// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s2" +// }, +// { +// "Id": "fa3057d9-0d4b-54c0-b2d5-34b7afc78f91", +// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s3" +// }, +// { +// "Id": "23284a2c-cdda-582a-a175-a196211b03cb", +// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\4" +// }, +// { +// "Id": "e0233918-d93f-5b08-839e-0cbeda79b68b", +// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s5" +// }, +// { +// "Id": "02740e08-d1d3-5715-9c08-c255eab4ca01", +// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s6" +// } +// ], +// "Path": "C:\\c\\1\\scratch" +// }, +// "MappedDirectories": [ +// { +// "HostPath": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s7", +// "ContainerPath": "c:\\mappedro", +// "ReadOnly": true +// }, +// { +// "HostPath": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s8", +// "ContainerPath": "c:\\mappedrw" +// } +// ] +// } +// }, +// "ShouldTerminateOnLastHandleClosed": true +//} + +// Helper to start a container. +// Ones created through hcsoci methods will be of type *hcs.System. +// Ones created through hcsshim methods will be of type hcsshim.Container +func startContainer(t *testing.T, c interface{}) { + var err error + switch c.(type) { + case *hcs.System: + err = c.(*hcs.System).Start() + case hcsshim.Container: + err = c.(hcsshim.Container).Start() + default: + t.Fatal("unknown type") + } + if err != nil { + t.Fatalf("Failed start: %s", err) + } +} + +// Helper to stop a container. +// Ones created through hcsoci methods will be of type *hcs.System. +// Ones created through hcsshim methods will be of type hcsshim.Container +func stopContainer(t *testing.T, c interface{}) { + + switch c.(type) { + case *hcs.System: + if err := c.(*hcs.System).Shutdown(); err != nil { + if hcsshim.IsPending(err) { + if err := c.(*hcs.System).Wait(); err != nil { + t.Fatalf("Failed Wait shutdown: %s", err) + } + } else { + t.Fatalf("Failed shutdown: %s", err) + } + } + c.(*hcs.System).Terminate() + + case hcsshim.Container: + if err := c.(hcsshim.Container).Shutdown(); err != nil { + if hcsshim.IsPending(err) { + if err := c.(hcsshim.Container).Wait(); err != nil { + t.Fatalf("Failed Wait shutdown: %s", err) + } + } else { + t.Fatalf("Failed shutdown: %s", err) + } + } + c.(hcsshim.Container).Terminate() + default: + t.Fatalf("unknown type") + } +} + +// Helper to launch a process in a container created through the hcsshim methods. +// At the point of calling, the container must have been successfully created. +func runShimCommand(t *testing.T, + c hcsshim.Container, + command string, + workdir string, + expectedExitCode int, + expectedOutput string) { + + if c == nil { + t.Fatalf("requested container to start is nil!") + } + p, err := c.CreateProcess(&hcsshim.ProcessConfig{ + CommandLine: command, + WorkingDirectory: workdir, + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: true, + }) + if err != nil { + t.Fatalf("Failed Create Process: %s", err) + + } + defer p.Close() + if err := p.Wait(); err != nil { + t.Fatalf("Failed Wait Process: %s", err) + } + exitCode, err := p.ExitCode() + if err != nil { + t.Fatalf("Failed to obtain process exit code: %s", err) + } + if exitCode != expectedExitCode { + t.Fatalf("Exit code from %s wasn't %d (%d)", command, expectedExitCode, exitCode) + } + _, o, _, err := p.Stdio() + if err != nil { + t.Fatalf("Failed to get Stdio handles for process: %s", err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(o) + out := strings.TrimSpace(buf.String()) + if expectedOutput != "" { + if out != expectedOutput { + t.Fatalf("Failed to get %q from process: %q", expectedOutput, out) + } + } +} + +func runShimCommands(t *testing.T, c hcsshim.Container) { + runShimCommand(t, c, `echo Hello`, `c:\`, 0, "Hello") + + // Check that read-only doesn't allow deletion or creation + runShimCommand(t, c, `ls c:\mappedro\readonly`, `c:\`, 0, `c:\mappedro\readonly`) + runShimCommand(t, c, `rm c:\mappedro\readonly`, `c:\`, 1, "") + runShimCommand(t, c, `cp readonly fail`, `c:\mappedro`, 1, "") + runShimCommand(t, c, `ls`, `c:\mappedro`, 0, `readonly`) + + // Check that read-write allows new file creation and removal + runShimCommand(t, c, `ls`, `c:\mappedrw`, 0, `readwrite`) + runShimCommand(t, c, `cp readwrite succeeds`, `c:\mappedrw`, 0, "") + runShimCommand(t, c, `ls`, `c:\mappedrw`, 0, "readwrite\nsucceeds") + runShimCommand(t, c, `rm succeeds`, `c:\mappedrw`, 0, "") + runShimCommand(t, c, `ls`, `c:\mappedrw`, 0, `readwrite`) +} + +func runHcsCommands(t *testing.T, c *hcs.System) { + runHcsCommand(t, c, `echo Hello`, `c:\`, 0, "Hello") + + // Check that read-only doesn't allow deletion or creation + runHcsCommand(t, c, `ls c:\mappedro\readonly`, `c:\`, 0, `c:\mappedro\readonly`) + runHcsCommand(t, c, `rm c:\mappedro\readonly`, `c:\`, 1, "") + runHcsCommand(t, c, `cp readonly fail`, `c:\mappedro`, 1, "") + runHcsCommand(t, c, `ls`, `c:\mappedro`, 0, `readonly`) + + // Check that read-write allows new file creation and removal + runHcsCommand(t, c, `ls`, `c:\mappedrw`, 0, `readwrite`) + runHcsCommand(t, c, `cp readwrite succeeds`, `c:\mappedrw`, 0, "") + runHcsCommand(t, c, `ls`, `c:\mappedrw`, 0, "readwrite\nsucceeds") + runHcsCommand(t, c, `rm succeeds`, `c:\mappedrw`, 0, "") + runHcsCommand(t, c, `ls`, `c:\mappedrw`, 0, `readwrite`) +} + +// Helper to launch a process in a container created through the hcsshim methods. +// At the point of calling, the container must have been successfully created. +func runHcsCommand(t *testing.T, + c *hcs.System, + command string, + workdir string, + expectedExitCode int, + expectedOutput string) { + + if c == nil { + t.Fatalf("requested container to start is nil!") + } + p, err := c.CreateProcess(&hcsshim.ProcessConfig{ + CommandLine: command, + WorkingDirectory: workdir, + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: true, + }) + if err != nil { + t.Fatalf("Failed Create Process: %s", err) + + } + defer p.Close() + if err := p.Wait(); err != nil { + t.Fatalf("Failed Wait Process: %s", err) + } + exitCode, err := p.ExitCode() + if err != nil { + t.Fatalf("Failed to obtain process exit code: %s", err) + } + if exitCode != expectedExitCode { + t.Fatalf("Exit code from %s wasn't %d (%d)", command, expectedExitCode, exitCode) + } + _, o, _, err := p.Stdio() + if err != nil { + t.Fatalf("Failed to get Stdio handles for process: %s", err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(o) + out := strings.TrimSpace(buf.String()) + if expectedOutput != "" { + if out != expectedOutput { + t.Fatalf("Failed to get %q from process: %q", expectedOutput, out) + } + } +} + +// busybox is used as it has lots of layers. Exercises more code. +// Also the commands are more flexible for verification +const imageName = "busyboxw" + +// Creates two temp folders used for the mounts/mapped directories +func createTestMounts(t *testing.T) (string, string) { + // Create two temp folders for mapped directories. + hostRWSharedDirectory := testutilities.CreateTempDir(t) + hostROSharedDirectory := testutilities.CreateTempDir(t) + fRW, _ := os.OpenFile(filepath.Join(hostRWSharedDirectory, "readwrite"), os.O_RDWR|os.O_CREATE, 0755) + fRO, _ := os.OpenFile(filepath.Join(hostROSharedDirectory, "readonly"), os.O_RDWR|os.O_CREATE, 0755) + fRW.Close() + fRO.Close() + return hostRWSharedDirectory, hostROSharedDirectory +} + +// For calling hcsshim interface, need hcsshim.Layer built from an images layer folders +func generateShimLayersStruct(t *testing.T, imageLayers []string) []hcsshim.Layer { + var layers []hcsshim.Layer + for _, layerFolder := range imageLayers { + guid, _ := wclayer.NameToGuid(filepath.Base(layerFolder)) + layers = append(layers, hcsshim.Layer{Path: layerFolder, ID: guid.String()}) + } + return layers +} + +// Argon through HCSShim interface (v1) +func TestWCOWArgonShim(t *testing.T) { + imageLayers := testutilities.LayerFolders(t, imageName) + argonShimMounted := false + + argonShimScratchDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(argonShimScratchDir) + if err := wclayer.CreateScratchLayer(argonShimScratchDir, imageLayers); err != nil { + t.Fatalf("failed to create argon scratch layer: %s", err) + } + + hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t) + defer os.RemoveAll(hostRWSharedDirectory) + defer os.RemoveAll(hostROSharedDirectory) + + layers := generateShimLayersStruct(t, imageLayers) + + // For cleanup on failure + defer func() { + if argonShimMounted { + hcsoci.UnmountContainerLayers(append(imageLayers, argonShimScratchDir), "", nil, hcsoci.UnmountOperationAll) + } + }() + + // This is a cheat but stops us re-writing exactly the same code just for test + argonShimLocalMountPath, err := hcsoci.MountContainerLayers(append(imageLayers, argonShimScratchDir), "", nil) + if err != nil { + t.Fatal(err) + } + argonShimMounted = true + argonShim, err := hcsshim.CreateContainer("argon", &hcsshim.ContainerConfig{ + SystemType: "Container", + Name: "argonShim", + VolumePath: argonShimLocalMountPath.(string), + LayerFolderPath: argonShimScratchDir, + Layers: layers, + MappedDirectories: []schema1.MappedDir{ + { + HostPath: hostROSharedDirectory, + ContainerPath: `c:\mappedro`, + ReadOnly: true, + }, + { + HostPath: hostRWSharedDirectory, + ContainerPath: `c:\mappedrw`, + }, + }, + HvRuntime: nil, + }) + if err != nil { + t.Fatal(err) + } + startContainer(t, argonShim) + runShimCommands(t, argonShim) + stopContainer(t, argonShim) + if err := hcsoci.UnmountContainerLayers(append(imageLayers, argonShimScratchDir), "", nil, hcsoci.UnmountOperationAll); err != nil { + t.Fatal(err) + } + argonShimMounted = false + +} + +// Xenon through HCSShim interface (v1) +func TestWCOWXenonShim(t *testing.T) { + imageLayers := testutilities.LayerFolders(t, imageName) + + xenonShimScratchDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(xenonShimScratchDir) + if err := wclayer.CreateScratchLayer(xenonShimScratchDir, imageLayers); err != nil { + t.Fatalf("failed to create xenon scratch layer: %s", err) + } + + hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t) + defer os.RemoveAll(hostRWSharedDirectory) + defer os.RemoveAll(hostROSharedDirectory) + + uvmImagePath, err := uvmfolder.LocateUVMFolder(imageLayers) + if err != nil { + t.Fatalf("LocateUVMFolder failed %s", err) + } + + layers := generateShimLayersStruct(t, imageLayers) + + xenonShim, err := hcsshim.CreateContainer("xenon", &hcsshim.ContainerConfig{ + SystemType: "Container", + Name: "xenonShim", + LayerFolderPath: xenonShimScratchDir, + Layers: layers, + HvRuntime: &hcsshim.HvRuntime{ImagePath: filepath.Join(uvmImagePath, "UtilityVM")}, + HvPartition: true, + MappedDirectories: []schema1.MappedDir{ + { + HostPath: hostROSharedDirectory, + ContainerPath: `c:\mappedro`, + ReadOnly: true, + }, + { + HostPath: hostRWSharedDirectory, + ContainerPath: `c:\mappedrw`, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + startContainer(t, xenonShim) + runShimCommands(t, xenonShim) + stopContainer(t, xenonShim) +} + +func generateWCOWOciTestSpec(t *testing.T, imageLayers []string, scratchPath, hostRWSharedDirectory, hostROSharedDirectory string) *specs.Spec { + return &specs.Spec{ + Windows: &specs.Windows{ + LayerFolders: append(imageLayers, scratchPath), + }, + Mounts: []specs.Mount{ + { + Source: hostROSharedDirectory, + Destination: `c:\mappedro`, + Options: []string{"ro"}, + }, + { + Source: hostRWSharedDirectory, + Destination: `c:\mappedrw`, + }, + }, + } +} + +// Argon through HCSOCI interface (v1) +func TestWCOWArgonOciV1(t *testing.T) { + imageLayers := testutilities.LayerFolders(t, imageName) + argonOci1Mounted := false + argonOci1ScratchDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(argonOci1ScratchDir) + if err := wclayer.CreateScratchLayer(argonOci1ScratchDir, imageLayers); err != nil { + t.Fatalf("failed to create argon scratch layer: %s", err) + } + + hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t) + defer os.RemoveAll(hostRWSharedDirectory) + defer os.RemoveAll(hostROSharedDirectory) + + // For cleanup on failure + var argonOci1Resources *hcsoci.Resources + var argonOci1 *hcs.System + defer func() { + if argonOci1Mounted { + hcsoci.ReleaseResources(argonOci1Resources, nil, true) + } + }() + + var err error + spec := generateWCOWOciTestSpec(t, imageLayers, argonOci1ScratchDir, hostRWSharedDirectory, hostROSharedDirectory) + argonOci1, argonOci1Resources, err = hcsoci.CreateContainer( + &hcsoci.CreateOptions{ + ID: "argonOci1", + SchemaVersion: schemaversion.SchemaV10(), + Spec: spec, + }) + if err != nil { + t.Fatal(err) + } + argonOci1Mounted = true + startContainer(t, argonOci1) + runHcsCommands(t, argonOci1) + stopContainer(t, argonOci1) + if err := hcsoci.ReleaseResources(argonOci1Resources, nil, true); err != nil { + t.Fatal(err) + } + argonOci1Mounted = false +} + +// Xenon through HCSOCI interface (v1) +func TestWCOWXenonOciV1(t *testing.T) { + imageLayers := testutilities.LayerFolders(t, imageName) + xenonOci1Mounted := false + + xenonOci1ScratchDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(xenonOci1ScratchDir) + if err := wclayer.CreateScratchLayer(xenonOci1ScratchDir, imageLayers); err != nil { + t.Fatalf("failed to create xenon scratch layer: %s", err) + } + + hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t) + defer os.RemoveAll(hostRWSharedDirectory) + defer os.RemoveAll(hostROSharedDirectory) + + // TODO: This isn't currently used. + // uvmImagePath, err := uvmfolder.LocateUVMFolder(imageLayers) + // if err != nil { + // t.Fatalf("LocateUVMFolder failed %s", err) + // } + + // For cleanup on failure + var xenonOci1Resources *hcsoci.Resources + var xenonOci1 *hcs.System + defer func() { + if xenonOci1Mounted { + hcsoci.ReleaseResources(xenonOci1Resources, nil, true) + } + }() + + var err error + spec := generateWCOWOciTestSpec(t, imageLayers, xenonOci1ScratchDir, hostRWSharedDirectory, hostROSharedDirectory) + spec.Windows.HyperV = &specs.WindowsHyperV{} + xenonOci1, xenonOci1Resources, err = hcsoci.CreateContainer( + &hcsoci.CreateOptions{ + ID: "xenonOci1", + SchemaVersion: schemaversion.SchemaV10(), + Spec: spec, + }) + if err != nil { + t.Fatal(err) + } + xenonOci1Mounted = true + startContainer(t, xenonOci1) + runHcsCommands(t, xenonOci1) + stopContainer(t, xenonOci1) + if err := hcsoci.ReleaseResources(xenonOci1Resources, nil, true); err != nil { + t.Fatal(err) + } + xenonOci1Mounted = false +} + +// Argon through HCSOCI interface (v2) +func TestWCOWArgonOciV2(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + imageLayers := testutilities.LayerFolders(t, imageName) + argonOci2Mounted := false + + argonOci2ScratchDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(argonOci2ScratchDir) + if err := wclayer.CreateScratchLayer(argonOci2ScratchDir, imageLayers); err != nil { + t.Fatalf("failed to create argon scratch layer: %s", err) + } + + hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t) + defer os.RemoveAll(hostRWSharedDirectory) + defer os.RemoveAll(hostROSharedDirectory) + + // For cleanup on failure + var argonOci2Resources *hcsoci.Resources + var argonOci2 *hcs.System + defer func() { + if argonOci2Mounted { + hcsoci.ReleaseResources(argonOci2Resources, nil, true) + } + }() + + var err error + spec := generateWCOWOciTestSpec(t, imageLayers, argonOci2ScratchDir, hostRWSharedDirectory, hostROSharedDirectory) + argonOci2, argonOci2Resources, err = hcsoci.CreateContainer( + &hcsoci.CreateOptions{ + ID: "argonOci2", + SchemaVersion: schemaversion.SchemaV21(), + Spec: spec, + }) + if err != nil { + t.Fatal(err) + } + argonOci2Mounted = true + startContainer(t, argonOci2) + runHcsCommands(t, argonOci2) + stopContainer(t, argonOci2) + if err := hcsoci.ReleaseResources(argonOci2Resources, nil, true); err != nil { + t.Fatal(err) + } + argonOci2Mounted = false + +} + +// Xenon through HCSOCI interface (v2) +func TestWCOWXenonOciV2(t *testing.T) { + testutilities.RequiresBuild(t, osversion.RS5) + imageLayers := testutilities.LayerFolders(t, imageName) + xenonOci2Mounted := false + xenonOci2UVMCreated := false + + xenonOci2ScratchDir := testutilities.CreateTempDir(t) + defer os.RemoveAll(xenonOci2ScratchDir) + if err := wclayer.CreateScratchLayer(xenonOci2ScratchDir, imageLayers); err != nil { + t.Fatalf("failed to create xenon scratch layer: %s", err) + } + + hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t) + defer os.RemoveAll(hostRWSharedDirectory) + defer os.RemoveAll(hostROSharedDirectory) + + uvmImagePath, err := uvmfolder.LocateUVMFolder(imageLayers) + if err != nil { + t.Fatalf("LocateUVMFolder failed %s", err) + } + + var xenonOci2Resources *hcsoci.Resources + var xenonOci2 *hcs.System + var xenonOci2UVM *uvm.UtilityVM + defer func() { + if xenonOci2Mounted { + hcsoci.ReleaseResources(xenonOci2Resources, xenonOci2UVM, true) + } + if xenonOci2UVMCreated { + xenonOci2UVM.Close() + } + }() + + // Create the utility VM. + xenonOci2UVMId := "xenonOci2UVM" + xenonOci2UVMScratchDir := testutilities.CreateTempDir(t) + if err := wcow.CreateUVMScratch(uvmImagePath, xenonOci2UVMScratchDir, xenonOci2UVMId); err != nil { + t.Fatalf("failed to create scratch: %s", err) + } + + xenonOciOpts := uvm.NewDefaultOptionsWCOW(xenonOci2UVMId, "") + xenonOciOpts.LayerFolders = append(imageLayers, xenonOci2UVMScratchDir) + xenonOci2UVM, err = uvm.CreateWCOW(xenonOciOpts) + if err != nil { + t.Fatalf("Failed create UVM: %s", err) + } + xenonOci2UVMCreated = true + if err := xenonOci2UVM.Start(); err != nil { + xenonOci2UVM.Close() + t.Fatalf("Failed start UVM: %s", err) + + } + + spec := generateWCOWOciTestSpec(t, imageLayers, xenonOci2ScratchDir, hostRWSharedDirectory, hostROSharedDirectory) + xenonOci2, xenonOci2Resources, err = hcsoci.CreateContainer( + &hcsoci.CreateOptions{ + ID: "xenonOci2", + HostingSystem: xenonOci2UVM, + SchemaVersion: schemaversion.SchemaV21(), + Spec: spec, + }) + if err != nil { + t.Fatal(err) + } + xenonOci2Mounted = true + startContainer(t, xenonOci2) + runHcsCommands(t, xenonOci2) + stopContainer(t, xenonOci2) + if err := hcsoci.ReleaseResources(xenonOci2Resources, xenonOci2UVM, true); err != nil { + t.Fatal(err) + } + xenonOci2Mounted = false + + // Terminate the UVM + xenonOci2UVM.Close() + xenonOci2UVMCreated = false +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/functional/wcow_xenon_v2_test.go b/vendor/github.com/Microsoft/hcsshim/test/functional/wcow_xenon_v2_test.go new file mode 100644 index 00000000..69e0e681 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/functional/wcow_xenon_v2_test.go @@ -0,0 +1,51 @@ +// xxxxbuild functional wcow wcowv2 wcowv2xenon + +package functional + +//import ( +// "os" +// "testing" + +// "github.com/Microsoft/hcsshim/test/functional/utilities" +// "github.com/Microsoft/hcsshim/internal/guid" +// "github.com/Microsoft/hcsshim/internal/hcsoci" +// "github.com/Microsoft/hcsshim/osversion" +// "github.com/Microsoft/hcsshim/internal/schemaversion" +// "github.com/Microsoft/hcsshim/internal/uvm" +// "github.com/Microsoft/hcsshim/internal/uvmfolder" +// "github.com/Microsoft/hcsshim/internal/wclayer" +// "github.com/Microsoft/hcsshim/internal/wcow" +// specs "github.com/opencontainers/runtime-spec/specs-go" +//) + +// TODO. This might be worth porting. +//// Lots of v2 WCOW containers in the same UVM, each with a single base layer. Containers aren't +//// actually started, but it stresses the SCSI controller hot-add logic. +//func TestV2XenonWCOWCreateLots(t *testing.T) { +// t.Skip("Skipping for now") +// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWCreateLots", nil) +// defer os.RemoveAll(uvmScratchDir) +// defer uvm.Close() + +// // 63 as 0:0 is already taken as the UVMs scratch. So that leaves us with 64-1 left for container scratches on SCSI +// for i := 0; i < 63; i++ { +// containerScratchDir := createWCOWTempDirWithSandbox(t) +// defer os.RemoveAll(containerScratchDir) +// layerFolders := append(layersNanoserver, containerScratchDir) +// hostedContainer, err := CreateContainer(&CreateOptions{ +// Id: fmt.Sprintf("container%d", i), +// HostingSystem: uvm, +// SchemaVersion: schemaversion.SchemaV21(), +// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}}, +// }) +// if err != nil { +// t.Fatalf("CreateContainer failed: %s", err) +// } +// defer hostedContainer.Terminate() +// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll) +// } + +// // TODO: Should check the internal structures here for VSMB and SCSI + +// // TODO: Push it over 63 now and will get a failure. +//} diff --git a/vendor/github.com/Microsoft/hcsshim/test/runhcs/create-scratch_test.go b/vendor/github.com/Microsoft/hcsshim/test/runhcs/create-scratch_test.go new file mode 100644 index 00000000..b5768bcf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/runhcs/create-scratch_test.go @@ -0,0 +1,67 @@ +// +build integration + +package runhcs + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + + runhcs "github.com/Microsoft/hcsshim/pkg/go-runhcs" +) + +func Test_CreateScratch_EmptyDestpath_Fail(t *testing.T) { + rhcs := runhcs.Runhcs{ + Debug: true, + } + + ctx := context.TODO() + err := rhcs.CreateScratch(ctx, "") + if err == nil { + t.Fatal("Should have failed 'CreateScratch' command.") + } +} + +func Test_CreateScratch_DirDestpath_Failure(t *testing.T) { + rhcs := runhcs.Runhcs{ + Debug: true, + } + + td, err := ioutil.TempDir("", "CreateScratch") + if err != nil { + t.Fatal(err) + } + defer os.Remove(td) + + ctx := context.TODO() + err = rhcs.CreateScratch(ctx, td) + if err == nil { + t.Fatal("Should have failed 'CreateScratch' command with dir destpath") + } +} + +func Test_CreateScratch_ValidDestpath_Success(t *testing.T) { + rhcs := runhcs.Runhcs{ + Debug: true, + } + + td, err := ioutil.TempDir("", "CreateScratch") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + scratchPath := filepath.Join(td, "scratch.vhdx") + + ctx := context.TODO() + err = rhcs.CreateScratch(ctx, scratchPath) + if err != nil { + t.Fatalf("Failed 'CreateScratch' command with: %v", err) + } + _, err = os.Stat(scratchPath) + if err != nil { + t.Fatalf("Failed to stat scratch path with: %v", err) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/runhcs/e2e_matrix_test.go b/vendor/github.com/Microsoft/hcsshim/test/runhcs/e2e_matrix_test.go new file mode 100644 index 00000000..7a8684e1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/runhcs/e2e_matrix_test.go @@ -0,0 +1,391 @@ +// +build integration + +package runhcs + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + "testing" + + "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/osversion" + runhcs "github.com/Microsoft/hcsshim/pkg/go-runhcs" + testutilities "github.com/Microsoft/hcsshim/test/functional/utilities" + runc "github.com/containerd/go-runc" + "github.com/opencontainers/runtime-tools/generate" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +// These support matrix of runhcs.exe for end to end activations is quite +// complex. These tests attempt to codify a simple start test on each support +// host/guest/isolation type so that we can have at least minimal confidence +// when changing code that activations across all platforms still work. +// +// Host OS | Container OS | Isolation +// +// RS1 | RS1 | V1 - Argon, Xenon +// +// RS3 | RS1 | V1 - Xenon +// | RS3 | V1 - Argon, Xenon +// +// RS4 | RS1, RS3 | V1 - Xenon +// | RS4 | V1 - Argon, Xenon +// +// RS5 | RS1, RS3, RS4 | V2 - UVM + Argon +// | RS5 | V2 - Argon, UVM + Argon, UVM + Argon (s) (POD's) +// | LCOW | V2 - UVM + Linux Container, UVM + Linux Container (s) (POD's) + +var _ = (runc.IO)(&testIO{}) + +type testIO struct { + g *errgroup.Group + + or, ow *os.File + outBuff *bytes.Buffer + + er, ew *os.File + errBuff *bytes.Buffer +} + +func newTestIO(t *testing.T) *testIO { + var err error + tio := &testIO{ + outBuff: &bytes.Buffer{}, + errBuff: &bytes.Buffer{}, + } + defer func() { + if err != nil { + tio.Close() + } + }() + + r, w, err := os.Pipe() + if err != nil { + t.Fatalf("failed to create stdout pipes: %v", err) + } + tio.or, tio.ow = r, w + r, w, err = os.Pipe() + if err != nil { + t.Fatalf("failed to create stderr pipes: %v", err) + } + tio.er, tio.ew = r, w + + g, _ := errgroup.WithContext(context.TODO()) + tio.g = g + tio.g.Go(func() error { + _, err := io.Copy(tio.outBuff, tio.Stdout()) + return err + }) + tio.g.Go(func() error { + _, err := io.Copy(tio.errBuff, tio.Stderr()) + return err + }) + return tio +} + +func (t *testIO) Stdin() io.WriteCloser { + return nil +} + +func (t *testIO) Stdout() io.ReadCloser { + return t.or +} + +func (t *testIO) Stderr() io.ReadCloser { + return t.er +} + +func (t *testIO) Set(cmd *exec.Cmd) { + cmd.Stdout = t.ow + cmd.Stderr = t.ew +} + +func (t *testIO) Close() error { + var err error + for _, v := range []*os.File{ + t.ow, t.ew, + t.or, t.er, + } { + if cerr := v.Close(); err == nil { + err = cerr + } + } + return err +} + +func (t *testIO) CloseAfterStart() error { + t.ow.Close() + t.ew.Close() + return nil +} + +func (t *testIO) Wait() error { + return t.g.Wait() +} + +func getWindowsImageNameByVersion(t *testing.T, bv int) string { + switch bv { + case osversion.RS1: + return "mcr.microsoft.com/windows/nanoserver:sac2016" + case osversion.RS3: + return "mcr.microsoft.com/windows/nanoserver:1709" + case osversion.RS4: + return "mcr.microsoft.com/windows/nanoserver:1803" + case osversion.RS5: + // testImage = "mcr.microsoft.com/windows/nanoserver:1809" + return "mcr.microsoft.com/windows/nanoserver/insider:10.0.17763.55" + default: + t.Fatalf("unsupported build (%d) for Windows containers", bv) + } + // Won't hit because of t.Fatal + return "" +} + +func readPidFile(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, errors.Wrap(err, "failed to read pidfile") + } + p, err := strconv.Atoi(string(data)) + if err != nil { + return -1, errors.Wrap(err, "pidfile failed to parse pid") + } + return p, nil +} + +func testWindows(t *testing.T, version int, isolated bool) { + var err error + + // Make the bundle + bundle := testutilities.CreateTempDir(t) + defer func() { + if err == nil { + os.RemoveAll(bundle) + } else { + t.Errorf("additional logs at bundle path: %v", bundle) + } + }() + scratch := testutilities.CreateTempDir(t) + defer func() { + vhd.DetachVhd(filepath.Join(scratch, "sandbox.vhdx")) + os.RemoveAll(scratch) + }() + + // Generate the Spec + g, err := generate.New("windows") + if err != nil { + t.Errorf("failed to generate Windows config with error: %v", err) + return + } + g.SetProcessArgs([]string{"cmd", "/c", "echo Hello World!"}) + if isolated { + g.SetWindowsHypervUntilityVMPath("") + } + g.Config.Windows.Network = nil + + // Get the LayerFolders + imageName := getWindowsImageNameByVersion(t, version) + layers := testutilities.LayerFolders(t, imageName) + for _, layer := range layers { + g.AddWindowsLayerFolders(layer) + } + g.AddWindowsLayerFolders(scratch) + + cf, err := os.Create(filepath.Join(bundle, "config.json")) + if err != nil { + t.Errorf("failed to create config.json with error: %v", err) + return + } + err = json.NewEncoder(cf).Encode(g.Config) + if err != nil { + cf.Close() + t.Errorf("failed to encode config.json with error: %v", err) + return + } + cf.Close() + + // Create the Argon, Xenon, or UVM + ctx := context.TODO() + rhcs := runhcs.Runhcs{ + Debug: true, + } + tio := newTestIO(t) + defer func() { + if err != nil { + t.Errorf("additional info stdout: '%v', stderr: '%v'", tio.outBuff.String(), tio.errBuff.String()) + } + }() + defer func() { + tio.Close() + }() + copts := &runhcs.CreateOpts{ + IO: tio, + PidFile: filepath.Join(bundle, "pid-file.txt"), + ShimLog: filepath.Join(bundle, "shim-log.txt"), + } + if isolated { + copts.VMLog = filepath.Join(bundle, "vm-log.txt") + } + err = rhcs.Create(ctx, t.Name(), bundle, copts) + if err != nil { + t.Errorf("failed to create container with error: %v", err) + return + } + defer func() { + rhcs.Delete(ctx, t.Name(), &runhcs.DeleteOpts{Force: true}) + }() + + // Find the shim/vmshim process and begin exit wait + pid, err := readPidFile(copts.PidFile) + if err != nil { + t.Errorf("failed to read pidfile with error: %v", err) + return + } + p, err := os.FindProcess(pid) + if err != nil { + t.Errorf("failed to find container process by pid: %d, with error: %v", pid, err) + return + } + + // Start the container + err = rhcs.Start(ctx, t.Name()) + if err != nil { + t.Errorf("failed to start container with error: %v", err) + return + } + defer func() { + if err != nil { + rhcs.Kill(ctx, t.Name(), "CtrlC") + } + }() + + // Wait for process exit, verify the exited state + var exitStatus int + _, eerr := p.Wait() + if eerr != nil { + if exitErr, ok := eerr.(*exec.ExitError); ok { + if ws, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitStatus = ws.ExitStatus() + } + } + } + if exitStatus != 0 { + err = eerr + t.Errorf("container process failed with exit status: %d", exitStatus) + return + } + + // Wait for the relay to exit + tio.Wait() + outString := tio.outBuff.String() + if outString != "Hello World!\r\n" { + t.Errorf("stdout expected: 'Hello World!', got: '%v'", outString) + } + + errString := tio.errBuff.String() + if errString != "" { + t.Errorf("stderr expected: '', got: '%v'", errString) + } +} + +func testWindowsPod(t *testing.T, version int, isolated bool) { + t.Skip("not implemented") +} + +func testLCOW(t *testing.T) { + t.Skip("not implemented") +} + +func testLCOWPod(t *testing.T) { + t.Skip("not implemented") +} + +func Test_RS1_Argon(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS1) + + testWindows(t, osversion.RS1, false) +} + +func Test_RS1_Xenon(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS1) + + testWindows(t, osversion.RS1, true) +} + +func Test_RS3_Argon(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS3) + + testWindows(t, osversion.RS3, false) +} + +func Test_RS3_Xenon(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS3) + + guests := []int{osversion.RS1, osversion.RS3} + for _, g := range guests { + testWindows(t, g, true) + } +} + +func Test_RS4_Argon(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS4) + + testWindows(t, osversion.RS4, false) +} + +func Test_RS4_Xenon(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS4) + + guests := []int{osversion.RS1, osversion.RS3, osversion.RS4} + for _, g := range guests { + testWindows(t, g, true) + } +} + +func Test_RS5_Argon(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS5) + + testWindows(t, osversion.RS5, false) +} + +func Test_RS5_ArgonPods(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS5) + + testWindowsPod(t, osversion.RS5, false) +} + +func Test_RS5_UVMAndContainer(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS5) + + guests := []int{osversion.RS1, osversion.RS3, osversion.RS4, osversion.RS5} + for _, g := range guests { + testWindows(t, g, true) + } +} + +func Test_RS5_UVMPods(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS5) + + testWindowsPod(t, osversion.RS5, true) +} + +func Test_RS5_LCOW(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS5) + + testLCOW(t) +} + +func Test_RS5_LCOW_UVMPods(t *testing.T) { + testutilities.RequiresExactBuild(t, osversion.RS5) + + testLCOWPod(t) +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/runhcs/list_test.go b/vendor/github.com/Microsoft/hcsshim/test/runhcs/list_test.go new file mode 100644 index 00000000..c0036aee --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/runhcs/list_test.go @@ -0,0 +1,25 @@ +// +build integration + +package runhcs + +import ( + "context" + "testing" + + runhcs "github.com/Microsoft/hcsshim/pkg/go-runhcs" +) + +func Test_List_NoContainers(t *testing.T) { + rhcs := runhcs.Runhcs{ + Debug: true, + } + + ctx := context.TODO() + cs, err := rhcs.List(ctx) + if err != nil { + t.Fatalf("Failed 'List' command with: %v", err) + } + if len(cs) != 0 { + t.Fatalf("Length of ContainerState array expected: 0, actual: %d", len(cs)) + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/test/runhcs/runhcs_test.go b/vendor/github.com/Microsoft/hcsshim/test/runhcs/runhcs_test.go new file mode 100644 index 00000000..c59a9c99 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/test/runhcs/runhcs_test.go @@ -0,0 +1,7 @@ +// +build integration + +package runhcs + +import ( + _ "github.com/Microsoft/hcsshim/test/functional/manifest" +) diff --git a/vendor/github.com/Microsoft/hcsshim/tools/uvmboot/main.go b/vendor/github.com/Microsoft/hcsshim/tools/uvmboot/main.go new file mode 100644 index 00000000..0cc4e043 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/tools/uvmboot/main.go @@ -0,0 +1,262 @@ +package main + +import ( + "fmt" + "io" + "os" + "strings" + "sync" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" +) + +const ( + kernelArgsArgName = "kernel-args" + rootFSTypeArgName = "root-fs-type" + vpMemMaxCountArgName = "vpmem-max-count" + vpMemMaxSizeArgName = "vpmem-max-size" + cpusArgName = "cpus" + memoryArgName = "memory" + allowOvercommitArgName = "allow-overcommit" + enableDeferredCommitArgName = "enable-deferred-commit" + measureArgName = "measure" + parallelArgName = "parallel" + countArgName = "count" + kernelDirectArgName = "kernel-direct" + execCommandLineArgName = "exec" + forwardStdoutArgName = "fwd-stdout" + forwardStderrArgName = "fwd-stderr" + debugArgName = "debug" + outputHandlingArgName = "output-handling" + consolePipeArgName = "console-pipe" +) + +func main() { + app := cli.NewApp() + app.Name = "uvmboot" + app.Usage = "Boot a utility VM" + + app.Flags = []cli.Flag{ + cli.Uint64Flag{ + Name: cpusArgName, + Usage: "Number of CPUs on the UVM. Uses hcsshim default if not specified", + }, + cli.UintFlag{ + Name: memoryArgName, + Usage: "Amount of memory on the UVM, in MB. Uses hcsshim default if not specified", + }, + cli.BoolFlag{ + Name: measureArgName, + Usage: "Measure wall clock time of the UVM run", + }, + cli.IntFlag{ + Name: parallelArgName, + Value: 1, + Usage: "Number of UVMs to boot in parallel", + }, + cli.IntFlag{ + Name: countArgName, + Value: 1, + Usage: "Total number of UVMs to run", + }, + cli.BoolFlag{ + Name: allowOvercommitArgName, + Usage: "Allow memory overcommit on the UVM", + }, + cli.BoolFlag{ + Name: enableDeferredCommitArgName, + Usage: "Enable deferred commit on the UVM", + }, + cli.BoolFlag{ + Name: debugArgName, + Usage: "Enable debug level logging in HCSShim", + }, + } + + app.Commands = []cli.Command{ + { + Name: "lcow", + Usage: "Boot an LCOW UVM", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: kernelArgsArgName, + Value: "", + Usage: "Additional arguments to pass to the kernel", + }, + cli.StringFlag{ + Name: rootFSTypeArgName, + Usage: "Either 'initrd' or 'vhd'. (default: 'vhd' if rootfs.vhd exists)", + }, + cli.UintFlag{ + Name: vpMemMaxCountArgName, + Usage: "Number of VPMem devices on the UVM. Uses hcsshim default if not specified", + }, + cli.Uint64Flag{ + Name: vpMemMaxSizeArgName, + Usage: "Size of each VPMem device, in MB. Uses hcsshim default if not specified", + }, + cli.BoolFlag{ + Name: kernelDirectArgName, + Usage: "Use kernel direct booting for UVM (default: true on builds >= 18286)", + }, + cli.StringFlag{ + Name: execCommandLineArgName, + Usage: "Command to execute in the UVM.", + }, + cli.BoolFlag{ + Name: forwardStdoutArgName, + Usage: "Whether stdout from the process in the UVM should be forwarded", + }, + cli.BoolFlag{ + Name: forwardStderrArgName, + Usage: "Whether stderr from the process in the UVM should be forwarded", + }, + cli.StringFlag{ + Name: outputHandlingArgName, + Usage: "Controls how output from UVM is handled. Use 'stdout' to print all output to stdout", + }, + cli.StringFlag{ + Name: consolePipeArgName, + Usage: "Named pipe for serial console output (which will be enabled)", + }, + }, + Action: func(c *cli.Context) error { + if c.GlobalBool("debug") { + logrus.SetLevel(logrus.DebugLevel) + } + + parallelCount := c.GlobalInt(parallelArgName) + + var wg sync.WaitGroup + wg.Add(parallelCount) + + workChan := make(chan int) + + runFunc := func(workChan <-chan int) { + for { + i, ok := <-workChan + + if !ok { + wg.Done() + return + } + + id := fmt.Sprintf("uvmboot-%d", i) + + options := uvm.NewDefaultOptionsLCOW(id, "") + options.UseGuestConnection = false + + if c.GlobalIsSet(cpusArgName) { + options.ProcessorCount = int32(c.GlobalUint64(cpusArgName)) + } + if c.GlobalIsSet(memoryArgName) { + options.MemorySizeInMB = int32(c.GlobalUint64(memoryArgName)) + } + if c.GlobalIsSet(allowOvercommitArgName) { + options.AllowOvercommit = c.GlobalBool(allowOvercommitArgName) + } + if c.GlobalIsSet(enableDeferredCommitArgName) { + options.EnableDeferredCommit = c.GlobalBool(enableDeferredCommitArgName) + } + + if c.IsSet(kernelDirectArgName) { + options.KernelDirect = c.Bool(kernelDirectArgName) + } + if c.IsSet(rootFSTypeArgName) { + switch strings.ToLower(c.String(rootFSTypeArgName)) { + case "initrd": + options.RootFSFile = uvm.InitrdFile + options.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd + case "vhd": + options.RootFSFile = uvm.VhdFile + options.PreferredRootFSType = uvm.PreferredRootFSTypeVHD + default: + logrus.Fatalf("Unrecognized value '%s' for option %s", c.String(rootFSTypeArgName), rootFSTypeArgName) + } + } + if c.IsSet(kernelArgsArgName) { + options.KernelBootOptions = c.String(kernelArgsArgName) + } + if c.IsSet(vpMemMaxCountArgName) { + options.VPMemDeviceCount = uint32(c.Uint(vpMemMaxCountArgName)) + } + if c.IsSet(vpMemMaxSizeArgName) { + options.VPMemSizeBytes = c.Uint64(vpMemMaxSizeArgName) * 1024 * 1024 // convert from MB to bytes + } + if c.IsSet(execCommandLineArgName) { + options.ExecCommandLine = c.String(execCommandLineArgName) + } + if c.IsSet(forwardStdoutArgName) { + options.ForwardStdout = c.Bool(forwardStdoutArgName) + } + if c.IsSet(forwardStderrArgName) { + options.ForwardStderr = c.Bool(forwardStderrArgName) + } + if c.IsSet(outputHandlingArgName) { + switch strings.ToLower(c.String(outputHandlingArgName)) { + case "stdout": + options.OutputHandler = uvm.OutputHandler(func(r io.Reader) { io.Copy(os.Stdout, r) }) + default: + logrus.Fatalf("Unrecognized value '%s' for option %s", c.String(outputHandlingArgName), outputHandlingArgName) + } + } + if c.IsSet(consolePipeArgName) { + options.ConsolePipe = c.String(consolePipeArgName) + } + + if err := run(options); err != nil { + logrus.WithField("uvm-id", id).Error(err) + } + } + } + + for i := 0; i < parallelCount; i++ { + go runFunc(workChan) + } + + start := time.Now() + + for i := 0; i < c.GlobalInt(countArgName); i++ { + workChan <- i + } + + close(workChan) + + wg.Wait() + + if c.GlobalBool(measureArgName) { + fmt.Println("Elapsed time:", time.Since(start)) + } + + return nil + }, + }, + } + + err := app.Run(os.Args) + if err != nil { + logrus.Fatal(err) + } +} + +func run(options *uvm.OptionsLCOW) error { + uvm, err := uvm.CreateLCOW(options) + if err != nil { + return err + } + defer uvm.Close() + + if err := uvm.Start(); err != nil { + return err + } + + if err := uvm.WaitExpectedError(hcs.ErrVmcomputeUnexpectedExit); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/tools/uvmboot/resource_windows_386.syso b/vendor/github.com/Microsoft/hcsshim/tools/uvmboot/resource_windows_386.syso new file mode 100644 index 0000000000000000000000000000000000000000..b4320575e4c5c95bfe35813298ae82a61e87fa35 GIT binary patch literal 968 zcma)5!HU#C5UuPE$O?joJ$P(`*Pcmq1~W@$M?@C}#9iEVJW40&>@+0ZA)OgrMA%R8 z3&bxF5wCuee;~dj6UaIqtfacCt6sgTbdnd2L$!S0Cu+TcGc0WxH_>*9kPG2{hOK1} z`lPw*T_<~V7cL{Zz6W~-3)2&);Sc^1KN#!;QP<*EJ1K{1{3js3Ko8rY;Sv3S|FyS2 zbjFA->73qTeNM07E$Eb9V!yz80>74@gI~Yfr|*heJ8!Z?16*BZ8=?x11HOt%Ql#k; zY>G0HVAhn9K^|uSj7MYWw6RaI5(RXC_!%kewVoU(Vdu7biS$ znXQCQL}s*{xmVJ<^W!_WLt(AVqwI=-K--zSF7=U(S27ot=PE9ZHEG3TlOGA24>!Kc za-mf!ZME!o0~LdWiL`O4iV9;KmutO>Z8+-3wlMG&qEeB{RP`UNZ@}vzw(Xjhq9`&I zgAFZP`Z`=oc%O6jM(M=7v&{Dx=YIq2x+qLp$>j8$Es~l0A&_4BSojZlltz#EIF$j9 zreea!L7GfF;ZMX=d~`=Wo^Z4~KsdTFG0;G54;q_myw6&0HQ#&d_5aTLtK4ti`X87o Bw2uG) literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/hcsshim/tools/uvmboot/resource_windows_amd64.syso b/vendor/github.com/Microsoft/hcsshim/tools/uvmboot/resource_windows_amd64.syso new file mode 100644 index 0000000000000000000000000000000000000000..d6100a85b94087599ac086568b810e2fba5e51c1 GIT binary patch literal 968 zcma)5!EVz)5FIzU6bXq#4;&YZ>n1TI!l|65LbOsYw4x;((b`@&E7`l2*9lapQcs-u z0>l>}gt+oe`~l+GjwP3HVC>nMoq6+Sytd*-|z1PQP<#CJ1K`M{3js3Ko6Rs;Sv3S|FyF} zaK?x(>4M&3J)u|d7Ia20v0q?4ggKbXJIQnrzWRYuQTZ!mJ1)RBpmzM8qOFHgDe zGFu8Ai_~a2bFZa!=STPN1;SdHh3Pc|fwnVuUFbs_tz;%F&s0ZDHWcMX5rSs`5Wt-+@;{Y}?f>MV_ZB z0vlR3^i{Z$;1TESjnc7sXPGx*oc|55>pV9_DdV#Xwuoo$2VahoXTp2J!z6sl$BFcL zI298<_LF!r65d2i#f>}c@C3u%0fOP3iGc=cd(hb2;(b=qtH1Zg>;9e9SJ|uI`XA_n BwIl!l literal 0 HcmV?d00001 diff --git a/vendor/github.com/Microsoft/hcsshim/vendor.conf b/vendor/github.com/Microsoft/hcsshim/vendor.conf new file mode 100644 index 00000000..6e0ed156 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/vendor.conf @@ -0,0 +1,21 @@ +github.com/blang/semver v3.1.0 +github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23 +github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3 +github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 +github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f +github.com/konsorten/go-windows-terminal-sequences v1.0.1 +github.com/linuxkit/virtsock 8e79449dea0735c1c056d814934dd035734cc97c +github.com/Microsoft/go-winio 16cfc975803886a5e47c4257a24c8d8c52e178b2 +github.com/Microsoft/opengcs v0.3.9 +github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 +github.com/opencontainers/runtime-tools 1d69bd0f9c39677d0630e50664fbc3154ae61b88 +github.com/pkg/errors v0.8.1 +github.com/sirupsen/logrus v1.3.0 +github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 +github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c +github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6 +github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b +github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874 +golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 +golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f +golang.org/x/sys e5ecc2a6747ce8d4af18ed98b3de5ae30eb3a5bb \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go new file mode 100644 index 00000000..8bed8485 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go @@ -0,0 +1,54 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hcsshim + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + + procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") +) + +func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { + r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +}