mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-08 22:16:38 +00:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
df0db7a54f | ||
|
|
bda7dd18cf | ||
|
|
617d795383 | ||
|
|
b9b8cfd1ca | ||
|
|
6b98771187 | ||
|
|
a52df7696a |
125
Gopkg.lock
generated
Normal file
125
Gopkg.lock
generated
Normal file
@@ -0,0 +1,125 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/StackExchange/wmi"
|
||||
packages = ["."]
|
||||
revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/alecthomas/template"
|
||||
packages = [
|
||||
".",
|
||||
"parse"
|
||||
]
|
||||
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/alecthomas/units"
|
||||
packages = ["."]
|
||||
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ole/go-ole"
|
||||
packages = [
|
||||
".",
|
||||
"oleutil"
|
||||
]
|
||||
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp"
|
||||
]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"log",
|
||||
"model",
|
||||
"version"
|
||||
]
|
||||
revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
]
|
||||
revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "182114d582623c1caa54f73de9c7224e23a48487"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
"windows/registry",
|
||||
"windows/svc",
|
||||
"windows/svc/eventlog"
|
||||
]
|
||||
revision = "8c0ece68c28377f4c326d85b94f8df0dace46f80"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
packages = ["."]
|
||||
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||
version = "v2.2.6"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "01d9c528210a99a08cc65af3a40d23a31df0c00193639751917ea1adb7d1ee1c"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
4
Gopkg.toml
Normal file
4
Gopkg.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
@@ -16,10 +16,12 @@ dns | [Win32_PerfRawData_DNS_DNS](https://technet.microsoft.com/en-us/library/cc
|
||||
iis | [Win32_PerfRawData_W3SVC_WebService](https://msdn.microsoft.com/en-us/library/aa394345) IIS metrics |
|
||||
logical_disk | [Win32_PerfRawData_PerfDisk_LogicalDisk](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71)) metrics (disk I/O) | ✓
|
||||
net | [Win32_PerfRawData_Tcpip_NetworkInterface](https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)) metrics (network interface I/O) | ✓
|
||||
msmq | [Win32_PerfRawData_MSMQ_MSMQQueue](http://wutils.com/wmi/root/cimv2/win32_perfrawdata_msmq_msmqqueue/) metrics (MSMQ/journal count) |
|
||||
os | [Win32_OperatingSystem](https://msdn.microsoft.com/en-us/library/aa394239) metrics (memory, processes, users) | ✓
|
||||
process | [Win32_PerfRawData_PerfProc_Process](https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx) metrics (per-process stats) |
|
||||
service | [Win32_Service](https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx) metrics (service states) | ✓
|
||||
system | Win32_PerfRawData_PerfOS_System metrics (system calls) | ✓
|
||||
tcp | [Win32_PerfRawData_Tcpip_TCPv4](https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx) metrics (tcp connections) |
|
||||
vmware | Performance counters installed by the Vmware Guest agent |
|
||||
|
||||
The HELP texts shows the WMI data source, please see MSDN documentation for details.
|
||||
@@ -51,7 +53,7 @@ See [open issues](https://github.com/martinlindhe/wmi_exporter/issues)
|
||||
|
||||
## Usage
|
||||
|
||||
go get -u github.com/kardianos/govendor
|
||||
go get -u github.com/golang/dep
|
||||
go get -u github.com/prometheus/promu
|
||||
go get -u github.com/martinlindhe/wmi_exporter
|
||||
cd $env:GOPATH/src/github.com/martinlindhe/wmi_exporter
|
||||
|
||||
@@ -15,13 +15,12 @@ clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
install:
|
||||
- go version
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%GOPATH%\bin\windows_%GOARCH%;%PATH%
|
||||
- go get -u github.com/kardianos/govendor
|
||||
- go get -u github.com/prometheus/promu
|
||||
- choco install gitversion.portable -y
|
||||
|
||||
build_script:
|
||||
- ps: gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
|
||||
- govendor test -v +local
|
||||
- go test -v ./...
|
||||
- promu build -v
|
||||
- ps: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
132
collector/msmq.go
Normal file
132
collector/msmq.go
Normal file
@@ -0,0 +1,132 @@
|
||||
// returns data points from Win32_PerfRawData_MSMQ_MSMQQueue
|
||||
// <add link to documentation here> - Win32_PerfRawData_MSMQ_MSMQQueue class
|
||||
package collector
|
||||
|
||||
import (
|
||||
"log"
|
||||
"flag"
|
||||
"bytes"
|
||||
"strings"
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["msmq"] = NewMSMQCollector
|
||||
}
|
||||
|
||||
var (
|
||||
msmqWhereClause = flag.String("collector.msmq.msmq-where", "", "WQL 'where' clause to use in WMI metrics query. Limits the response to the msmqs you specify and reduces the size of the response.")
|
||||
)
|
||||
|
||||
// A Win32_PerfRawData_MSMQ_MSMQQueueCollector is a Prometheus collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics
|
||||
type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
|
||||
BytesinJournalQueue *prometheus.Desc
|
||||
BytesinQueue *prometheus.Desc
|
||||
MessagesinJournalQueue *prometheus.Desc
|
||||
MessagesinQueue *prometheus.Desc
|
||||
|
||||
queryWhereClause string
|
||||
}
|
||||
|
||||
// NewWin32_PerfRawData_MSMQ_MSMQQueueCollector ...
|
||||
func NewMSMQCollector() (Collector, error) {
|
||||
const subsystem = "msmq"
|
||||
|
||||
var wc bytes.Buffer
|
||||
if *msmqWhereClause != "" {
|
||||
wc.WriteString("WHERE ")
|
||||
wc.WriteString(*msmqWhereClause)
|
||||
log.Println("warning: No where-clause specified for msmq collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
// else {
|
||||
// log.Println("warning: No where-clause specified for msmq collector. This will generate a very large number of metrics!")
|
||||
// }
|
||||
return &Win32_PerfRawData_MSMQ_MSMQQueueCollector{
|
||||
BytesinJournalQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_in_journal_queue"),
|
||||
"Size of queue journal in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
BytesinQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_in_queue"),
|
||||
"Size of queue in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
MessagesinJournalQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_in_journal_queue"),
|
||||
"Count messages in queue journal",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
MessagesinQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_in_queue"),
|
||||
"Count messages in queue",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
queryWhereClause: wc.String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting msmq metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_MSMQ_MSMQQueue struct {
|
||||
Name string
|
||||
|
||||
BytesinJournalQueue uint64
|
||||
BytesinQueue uint64
|
||||
MessagesinJournalQueue uint64
|
||||
MessagesinQueue uint64
|
||||
}
|
||||
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_MSMQ_MSMQQueue
|
||||
q := wmi.CreateQuery(&dst, c.queryWhereClause)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, msmq := range dst {
|
||||
|
||||
if msmq.Name == "Computer Queues" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesinJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.BytesinJournalQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesinQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.BytesinQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesinJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.MessagesinJournalQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesinQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.MessagesinQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -39,6 +39,7 @@ type NetworkCollector struct {
|
||||
PacketsReceivedTotal *prometheus.Desc
|
||||
PacketsReceivedUnknown *prometheus.Desc
|
||||
PacketsSentTotal *prometheus.Desc
|
||||
CurrentBandwidth *prometheus.Desc
|
||||
|
||||
nicWhitelistPattern *regexp.Regexp
|
||||
nicBlacklistPattern *regexp.Regexp
|
||||
@@ -115,6 +116,12 @@ func NewNetworkCollector() (Collector, error) {
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
CurrentBandwidth: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "current_bandwidth"),
|
||||
"(Network.CurrentBandwidth)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
|
||||
nicWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicWhitelist)),
|
||||
nicBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicBlacklist)),
|
||||
@@ -150,6 +157,7 @@ type Win32_PerfRawData_Tcpip_NetworkInterface struct {
|
||||
PacketsReceivedPerSec uint64
|
||||
PacketsReceivedUnknown uint64
|
||||
PacketsSentPerSec uint64
|
||||
CurrentBandwidth uint64
|
||||
}
|
||||
|
||||
func (c *NetworkCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
@@ -238,6 +246,12 @@ func (c *NetworkCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Des
|
||||
float64(nic.PacketsSentPerSec),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentBandwidth,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.CurrentBandwidth),
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
|
||||
171
collector/tcp.go
Normal file
171
collector/tcp.go
Normal file
@@ -0,0 +1,171 @@
|
||||
// returns data points from Win32_PerfRawData_Tcpip_TCPv4
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx (Win32_PerfRawData_Tcpip_TCPv4 class)
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["tcp"] = NewTCPCollector
|
||||
}
|
||||
|
||||
// A TCPCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_TCPv4 metrics
|
||||
type TCPCollector struct {
|
||||
ConnectionFailures *prometheus.Desc
|
||||
ConnectionsActive *prometheus.Desc
|
||||
ConnectionsEstablished *prometheus.Desc
|
||||
ConnectionsPassive *prometheus.Desc
|
||||
ConnectionsReset *prometheus.Desc
|
||||
SegmentsTotal *prometheus.Desc
|
||||
SegmentsReceivedTotal *prometheus.Desc
|
||||
SegmentsRetransmittedTotal *prometheus.Desc
|
||||
SegmentsSentTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewTCPCollector ...
|
||||
func NewTCPCollector() (Collector, error) {
|
||||
const subsystem = "tcp"
|
||||
|
||||
return &TCPCollector{
|
||||
ConnectionFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_failures"),
|
||||
"(TCP.ConnectionFailures)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ConnectionsActive: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_active"),
|
||||
"(TCP.ConnectionsActive)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ConnectionsEstablished: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_established"),
|
||||
"(TCP.ConnectionsEstablished)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ConnectionsPassive: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_passive"),
|
||||
"(TCP.ConnectionsPassive)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ConnectionsReset: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_reset"),
|
||||
"(TCP.ConnectionsReset)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SegmentsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_total"),
|
||||
"(TCP.SegmentsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SegmentsReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_received_total"),
|
||||
"(TCP.SegmentsReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SegmentsRetransmittedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_retransmitted_total"),
|
||||
"(TCP.SegmentsRetransmittedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SegmentsSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_sent_total"),
|
||||
"(TCP.SegmentsSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TCPCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting tcp metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_Tcpip_TCPv4 struct {
|
||||
ConnectionFailures uint64
|
||||
ConnectionsActive uint64
|
||||
ConnectionsEstablished uint64
|
||||
ConnectionsPassive uint64
|
||||
ConnectionsReset uint64
|
||||
SegmentsPersec uint64
|
||||
SegmentsReceivedPersec uint64
|
||||
SegmentsRetransmittedPersec uint64
|
||||
SegmentsSentPersec uint64
|
||||
}
|
||||
|
||||
func (c *TCPCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Tcpip_TCPv4
|
||||
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Counters
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionFailures),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsActive,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionsActive),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsEstablished,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionsEstablished),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsPassive,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionsPassive),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsReset,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionsReset),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SegmentsPersec),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SegmentsReceivedPersec),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsRetransmittedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SegmentsRetransmittedPersec),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SegmentsSentPersec),
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
66
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
66
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
@@ -1,66 +0,0 @@
|
||||
# 0.10.0
|
||||
|
||||
* feature: Add a test hook (#180)
|
||||
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||
* performance: avoid re-allocations on `WithFields` (#335)
|
||||
|
||||
# 0.9.0
|
||||
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
* logrus/core: run tests with `-race`
|
||||
* logrus/core: detect TTY based on `stderr`
|
||||
* logrus/core: support `WithError` on logger
|
||||
* logrus/core: Solaris support
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
* formatter/text: Add configuration option for time format (#158)
|
||||
421
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
421
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
@@ -1,421 +0,0 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/Sirupsen/logrus) [](https://godoc.org/github.com/Sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||
many large deployments. The core API is unlikely to change much but please
|
||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||
every build.**
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||

|
||||
|
||||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||
exit status 1
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Output to stderr instead of stdout, could also be a file.
|
||||
log.SetOutput(os.Stderr)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
|
||||
// A common pattern is to re-use fields between logging statements by re-using
|
||||
// the logrus.Entry returned from WithFields()
|
||||
contextLogger := log.WithFields(log.Fields{
|
||||
"common": "this is a common field",
|
||||
"other": "I also should be logged always",
|
||||
})
|
||||
|
||||
contextLogger.Info("I'll be logged with common and other field")
|
||||
contextLogger.Info("Me too")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stderr
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging though logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
| Hook | Description |
|
||||
| ----- | ----------- |
|
||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
|
||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
||||
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
||||
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
||||
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
srv := http.Server{
|
||||
// create a stdlib log.Logger that writes to
|
||||
// logrus.Logger.
|
||||
ErrorLog: log.New(w, "", 0),
|
||||
}
|
||||
```
|
||||
|
||||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotate(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
#### Tools
|
||||
|
||||
| Tool | Description |
|
||||
| ---- | ----------- |
|
||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||
|
||||
#### Testing
|
||||
|
||||
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
||||
|
||||
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
||||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||
|
||||
```go
|
||||
logger, hook := NewNullLogger()
|
||||
logger.Error("Hello error")
|
||||
|
||||
assert.Equal(1, len(hook.Entries))
|
||||
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(hook.LastEntry())
|
||||
```
|
||||
|
||||
#### Fatal handlers
|
||||
|
||||
Logrus can register one or more functions that will be called when any `fatal`
|
||||
level message is logged. The registered handlers will be executed before
|
||||
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
|
||||
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
||||
|
||||
```
|
||||
...
|
||||
handler := func() {
|
||||
// gracefully shutdown something...
|
||||
}
|
||||
logrus.RegisterExitHandler(handler)
|
||||
...
|
||||
```
|
||||
|
||||
#### Thread safty
|
||||
|
||||
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||
|
||||
Situation when locking is not needed includes:
|
||||
|
||||
* You have no hooks registered, or hooks calling is already thread-safe.
|
||||
|
||||
* Writing to logger.Out is already thread-safe, for example:
|
||||
|
||||
1) logger.Out is protected by locks.
|
||||
|
||||
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
|
||||
|
||||
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
|
||||
2
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
package logrus
|
||||
|
||||
// The following code was sourced and modified from the
|
||||
// https://bitbucket.org/tebeka/atexit package governed by the following license:
|
||||
// https://github.com/tebeka/atexit package governed by the following license:
|
||||
//
|
||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||
//
|
||||
|
||||
4
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
4
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
@@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/Sirupsen/logrus
|
||||
For a full guide visit https://github.com/sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
||||
|
||||
85
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
85
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
@@ -35,6 +35,7 @@ type Entry struct {
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
@@ -93,29 +94,16 @@ func (entry Entry) log(level Level, msg string) {
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
|
||||
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
entry.fireHooks()
|
||||
|
||||
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||
buffer.Reset()
|
||||
defer bufferPool.Put(buffer)
|
||||
entry.Buffer = buffer
|
||||
serialized, err := entry.Logger.Formatter.Format(&entry)
|
||||
|
||||
entry.write()
|
||||
|
||||
entry.Buffer = nil
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
} else {
|
||||
entry.Logger.mu.Lock()
|
||||
_, err = entry.Logger.Out.Write(serialized)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
@@ -125,8 +113,33 @@ func (entry Entry) log(level Level, msg string) {
|
||||
}
|
||||
}
|
||||
|
||||
// This function is not declared with a pointer value because otherwise
|
||||
// race conditions will occur when using multiple goroutines
|
||||
func (entry Entry) fireHooks() {
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
err := entry.Logger.Hooks.Fire(entry.Level, &entry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) write() {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
} else {
|
||||
_, err = entry.Logger.Out.Write(serialized)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
@@ -136,13 +149,13 @@ func (entry *Entry) Print(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
@@ -152,20 +165,20 @@ func (entry *Entry) Warning(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
@@ -174,13 +187,13 @@ func (entry *Entry) Panic(args ...interface{}) {
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
@@ -190,7 +203,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
@@ -200,20 +213,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
@@ -221,13 +234,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
@@ -237,7 +250,7 @@ func (entry *Entry) Println(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
@@ -247,20 +260,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
4
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
4
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
@@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) {
|
||||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Level = level
|
||||
std.SetLevel(level)
|
||||
}
|
||||
|
||||
// GetLevel returns the standard logger level.
|
||||
func GetLevel() Level {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
return std.Level
|
||||
return std.level()
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
|
||||
2
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
@@ -2,7 +2,7 @@ package logrus
|
||||
|
||||
import "time"
|
||||
|
||||
const DefaultTimestampFormat = time.RFC3339
|
||||
const defaultTimestampFormat = time.RFC3339
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
|
||||
48
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
48
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
@@ -5,18 +5,54 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type fieldKey string
|
||||
|
||||
// FieldMap allows customization of the key names for default fields.
|
||||
type FieldMap map[fieldKey]string
|
||||
|
||||
// Default key names for the default fields
|
||||
const (
|
||||
FieldKeyMsg = "msg"
|
||||
FieldKeyLevel = "level"
|
||||
FieldKeyTime = "time"
|
||||
)
|
||||
|
||||
func (f FieldMap) resolve(key fieldKey) string {
|
||||
if k, ok := f[key]; ok {
|
||||
return k
|
||||
}
|
||||
|
||||
return string(key)
|
||||
}
|
||||
|
||||
// JSONFormatter formats logs into parsable json
|
||||
type JSONFormatter struct {
|
||||
// TimestampFormat sets the format used for marshaling timestamps.
|
||||
TimestampFormat string
|
||||
|
||||
// DisableTimestamp allows disabling automatic timestamps in output
|
||||
DisableTimestamp bool
|
||||
|
||||
// FieldMap allows users to customize the names of keys for default fields.
|
||||
// As an example:
|
||||
// formatter := &JSONFormatter{
|
||||
// FieldMap: FieldMap{
|
||||
// FieldKeyTime: "@timestamp",
|
||||
// FieldKeyLevel: "@level",
|
||||
// FieldKeyMsg: "@message",
|
||||
// },
|
||||
// }
|
||||
FieldMap FieldMap
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/Sirupsen/logrus/issues/137
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
@@ -26,12 +62,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
|
||||
data["time"] = entry.Time.Format(timestampFormat)
|
||||
data["msg"] = entry.Message
|
||||
data["level"] = entry.Level.String()
|
||||
if !f.DisableTimestamp {
|
||||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||
}
|
||||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
|
||||
59
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
59
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
@@ -24,7 +25,7 @@ type Logger struct {
|
||||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged. `logrus.Debug` is useful in
|
||||
// logged.
|
||||
Level Level
|
||||
// Used to sync writing to the log. Locking is enabled by Default
|
||||
mu MutexWrap
|
||||
@@ -112,7 +113,7 @@ func (logger *Logger) WithError(err error) *Entry {
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -120,7 +121,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infof(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -134,7 +135,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -142,7 +143,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -150,7 +151,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -158,7 +159,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -167,7 +168,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -175,7 +176,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debug(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -183,7 +184,7 @@ func (logger *Logger) Debug(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -197,7 +198,7 @@ func (logger *Logger) Print(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -205,7 +206,7 @@ func (logger *Logger) Warn(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -213,7 +214,7 @@ func (logger *Logger) Warning(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Error(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -221,7 +222,7 @@ func (logger *Logger) Error(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatal(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -230,7 +231,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panic(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -238,7 +239,7 @@ func (logger *Logger) Panic(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -246,7 +247,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infoln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -260,7 +261,7 @@ func (logger *Logger) Println(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -268,7 +269,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -276,7 +277,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -284,7 +285,7 @@ func (logger *Logger) Errorln(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -293,7 +294,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
@@ -306,3 +307,17 @@ func (logger *Logger) Panicln(args ...interface{}) {
|
||||
func (logger *Logger) SetNoLock() {
|
||||
logger.mu.Disable()
|
||||
}
|
||||
|
||||
func (logger *Logger) level() Level {
|
||||
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||
}
|
||||
|
||||
func (logger *Logger) SetLevel(level Level) {
|
||||
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||
}
|
||||
|
||||
func (logger *Logger) AddHook(hook Hook) {
|
||||
logger.mu.Lock()
|
||||
defer logger.mu.Unlock()
|
||||
logger.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
2
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
@@ -10,7 +10,7 @@ import (
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint8
|
||||
type Level uint32
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
|
||||
8
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
8
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
@@ -1,8 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
package logrus
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
return true
|
||||
}
|
||||
8
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
8
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
@@ -1,10 +1,10 @@
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
// +build !appengine,!gopherjs
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
const ioctlReadTermios = unix.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
||||
type Termios unix.Termios
|
||||
|
||||
8
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
8
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
@@ -3,12 +3,12 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build !appengine,!gopherjs
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = syscall.TCGETS
|
||||
const ioctlReadTermios = unix.TCGETS
|
||||
|
||||
type Termios syscall.Termios
|
||||
type Termios unix.Termios
|
||||
|
||||
22
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
22
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
@@ -1,22 +0,0 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stderr
|
||||
var termios Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
||||
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
// +build solaris,!appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
||||
return err == nil
|
||||
}
|
||||
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows,!appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
var (
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
)
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stderr
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
||||
83
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
83
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
@@ -3,9 +3,9 @@ package logrus
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -14,24 +14,19 @@ const (
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 34
|
||||
blue = 36
|
||||
gray = 37
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
isTerminal bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
isTerminal = IsTerminal()
|
||||
}
|
||||
|
||||
func miniTS() int {
|
||||
return int(time.Since(baseTimestamp) / time.Second)
|
||||
}
|
||||
|
||||
// TextFormatter formats logs into text
|
||||
type TextFormatter struct {
|
||||
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
ForceColors bool
|
||||
@@ -54,11 +49,26 @@ type TextFormatter struct {
|
||||
// that log extremely frequently and don't use the JSON formatter this may not
|
||||
// be desired.
|
||||
DisableSorting bool
|
||||
|
||||
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||
QuoteEmptyFields bool
|
||||
|
||||
// Whether the logger's out is to a terminal
|
||||
isTerminal bool
|
||||
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func (f *TextFormatter) init(entry *Entry) {
|
||||
if entry.Logger != nil {
|
||||
f.isTerminal = checkIfTerminal(entry.Logger.Out)
|
||||
}
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
var b *bytes.Buffer
|
||||
var keys []string = make([]string, 0, len(entry.Data))
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
@@ -74,12 +84,13 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
|
||||
prefixFieldClashes(entry.Data)
|
||||
|
||||
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
||||
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
||||
f.Do(func() { f.init(entry) })
|
||||
|
||||
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if isColored {
|
||||
f.printColored(b, entry, keys, timestampFormat)
|
||||
@@ -115,23 +126,29 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
||||
|
||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||
|
||||
if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
||||
if f.DisableTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
||||
} else if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||
f.appendValue(b, v)
|
||||
}
|
||||
}
|
||||
|
||||
func needsQuoting(text string) bool {
|
||||
func (f *TextFormatter) needsQuoting(text string) bool {
|
||||
if f.QuoteEmptyFields && len(text) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, ch := range text {
|
||||
if !((ch >= 'a' && ch <= 'z') ||
|
||||
(ch >= 'A' && ch <= 'Z') ||
|
||||
(ch >= '0' && ch <= '9') ||
|
||||
ch == '-' || ch == '.') {
|
||||
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -139,27 +156,23 @@ func needsQuoting(text string) bool {
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||
|
||||
if b.Len() > 0 {
|
||||
b.WriteByte(' ')
|
||||
}
|
||||
b.WriteString(key)
|
||||
b.WriteByte('=')
|
||||
f.appendValue(b, value)
|
||||
}
|
||||
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
if !needsQuoting(value) {
|
||||
b.WriteString(value)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%q", value)
|
||||
}
|
||||
case error:
|
||||
errmsg := value.Error()
|
||||
if !needsQuoting(errmsg) {
|
||||
b.WriteString(errmsg)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%q", value)
|
||||
}
|
||||
default:
|
||||
fmt.Fprint(b, value)
|
||||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||
stringVal, ok := value.(string)
|
||||
if !ok {
|
||||
stringVal = fmt.Sprint(value)
|
||||
}
|
||||
|
||||
b.WriteByte(' ')
|
||||
if !f.needsQuoting(stringVal) {
|
||||
b.WriteString(stringVal)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%q", stringVal))
|
||||
}
|
||||
}
|
||||
|
||||
29
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
29
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
@@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter {
|
||||
}
|
||||
|
||||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||
return NewEntry(logger).WriterLevel(level)
|
||||
}
|
||||
|
||||
func (entry *Entry) Writer() *io.PipeWriter {
|
||||
return entry.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
var printFunc func(args ...interface{})
|
||||
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
printFunc = logger.Debug
|
||||
printFunc = entry.Debug
|
||||
case InfoLevel:
|
||||
printFunc = logger.Info
|
||||
printFunc = entry.Info
|
||||
case WarnLevel:
|
||||
printFunc = logger.Warn
|
||||
printFunc = entry.Warn
|
||||
case ErrorLevel:
|
||||
printFunc = logger.Error
|
||||
printFunc = entry.Error
|
||||
case FatalLevel:
|
||||
printFunc = logger.Fatal
|
||||
printFunc = entry.Fatal
|
||||
case PanicLevel:
|
||||
printFunc = logger.Panic
|
||||
printFunc = entry.Panic
|
||||
default:
|
||||
printFunc = logger.Print
|
||||
printFunc = entry.Print
|
||||
}
|
||||
|
||||
go logger.writerScanner(reader, printFunc)
|
||||
go entry.writerScanner(reader, printFunc)
|
||||
runtime.SetFinalizer(writer, writerFinalizer)
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
printFunc(scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.Errorf("Error while reading from Writer: %s", err)
|
||||
entry.Errorf("Error while reading from Writer: %s", err)
|
||||
}
|
||||
reader.Close()
|
||||
}
|
||||
|
||||
6
vendor/github.com/StackExchange/wmi/README.md
generated
vendored
6
vendor/github.com/StackExchange/wmi/README.md
generated
vendored
@@ -1,6 +0,0 @@
|
||||
wmi
|
||||
===
|
||||
|
||||
Package wmi provides a WQL interface to Windows WMI.
|
||||
|
||||
Note: It interfaces with WMI on the local machine, therefore it only runs on Windows.
|
||||
62
vendor/github.com/StackExchange/wmi/wmi.go
generated
vendored
62
vendor/github.com/StackExchange/wmi/wmi.go
generated
vendored
@@ -370,32 +370,50 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Only support []string slices for now
|
||||
if f.Kind() == reflect.Slice && f.Type().Elem().Kind() == reflect.String {
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetString(v.(string))
|
||||
if f.Kind() == reflect.Slice {
|
||||
switch f.Type().Elem().Kind() {
|
||||
case reflect.String:
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetString(v.(string))
|
||||
}
|
||||
f.Set(fArr)
|
||||
}
|
||||
case reflect.Uint8:
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetUint(reflect.ValueOf(v).Uint())
|
||||
}
|
||||
f.Set(fArr)
|
||||
}
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: fmt.Sprintf("unsupported slice type (%T)", val),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
typeof := reflect.TypeOf(val)
|
||||
if typeof == nil && (isPtr || c.NonePtrZero) {
|
||||
if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
|
||||
of.Set(reflect.Zero(of.Type()))
|
||||
}
|
||||
f.Set(fArr)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
typeof := reflect.TypeOf(val)
|
||||
if typeof == nil && (isPtr || c.NonePtrZero) {
|
||||
if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
|
||||
of.Set(reflect.Zero(of.Type()))
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: fmt.Sprintf("unsupported type (%T)", val),
|
||||
}
|
||||
break
|
||||
}
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: fmt.Sprintf("unsupported type (%T)", val),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
Normal file
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
Normal file
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
Normal file
@@ -0,0 +1,406 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package template implements data-driven templates for generating textual output.
|
||||
|
||||
To generate HTML output, see package html/template, which has the same interface
|
||||
as this package but automatically secures HTML output against certain attacks.
|
||||
|
||||
Templates are executed by applying them to a data structure. Annotations in the
|
||||
template refer to elements of the data structure (typically a field of a struct
|
||||
or a key in a map) to control execution and derive values to be displayed.
|
||||
Execution of the template walks the structure and sets the cursor, represented
|
||||
by a period '.' and called "dot", to the value at the current location in the
|
||||
structure as execution proceeds.
|
||||
|
||||
The input text for a template is UTF-8-encoded text in any format.
|
||||
"Actions"--data evaluations or control structures--are delimited by
|
||||
"{{" and "}}"; all text outside actions is copied to the output unchanged.
|
||||
Actions may not span newlines, although comments can.
|
||||
|
||||
Once parsed, a template may be executed safely in parallel.
|
||||
|
||||
Here is a trivial example that prints "17 items are made of wool".
|
||||
|
||||
type Inventory struct {
|
||||
Material string
|
||||
Count uint
|
||||
}
|
||||
sweaters := Inventory{"wool", 17}
|
||||
tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
|
||||
if err != nil { panic(err) }
|
||||
err = tmpl.Execute(os.Stdout, sweaters)
|
||||
if err != nil { panic(err) }
|
||||
|
||||
More intricate examples appear below.
|
||||
|
||||
Actions
|
||||
|
||||
Here is the list of actions. "Arguments" and "pipelines" are evaluations of
|
||||
data, defined in detail below.
|
||||
|
||||
*/
|
||||
// {{/* a comment */}}
|
||||
// A comment; discarded. May contain newlines.
|
||||
// Comments do not nest and must start and end at the
|
||||
// delimiters, as shown here.
|
||||
/*
|
||||
|
||||
{{pipeline}}
|
||||
The default textual representation of the value of the pipeline
|
||||
is copied to the output.
|
||||
|
||||
{{if pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, T1 is executed. The empty values are false, 0, any
|
||||
nil pointer or interface value, and any array, slice, map, or
|
||||
string of length zero.
|
||||
Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, T0 is executed;
|
||||
otherwise, T1 is executed. Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
|
||||
To simplify the appearance of if-else chains, the else action
|
||||
of an if may include another if directly; the effect is exactly
|
||||
the same as writing
|
||||
{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
|
||||
|
||||
{{range pipeline}} T1 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, nothing is output;
|
||||
otherwise, dot is set to the successive elements of the array,
|
||||
slice, or map and T1 is executed. If the value is a map and the
|
||||
keys are of basic type with a defined order ("comparable"), the
|
||||
elements will be visited in sorted key order.
|
||||
|
||||
{{range pipeline}} T1 {{else}} T0 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, dot is unaffected and
|
||||
T0 is executed; otherwise, dot is set to the successive elements
|
||||
of the array, slice, or map and T1 is executed.
|
||||
|
||||
{{template "name"}}
|
||||
The template with the specified name is executed with nil data.
|
||||
|
||||
{{template "name" pipeline}}
|
||||
The template with the specified name is executed with dot set
|
||||
to the value of the pipeline.
|
||||
|
||||
{{with pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, dot is set to the value of the pipeline and T1 is
|
||||
executed.
|
||||
|
||||
{{with pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, dot is unaffected and T0
|
||||
is executed; otherwise, dot is set to the value of the pipeline
|
||||
and T1 is executed.
|
||||
|
||||
Arguments
|
||||
|
||||
An argument is a simple value, denoted by one of the following.
|
||||
|
||||
- A boolean, string, character, integer, floating-point, imaginary
|
||||
or complex constant in Go syntax. These behave like Go's untyped
|
||||
constants, although raw strings may not span newlines.
|
||||
- The keyword nil, representing an untyped Go nil.
|
||||
- The character '.' (period):
|
||||
.
|
||||
The result is the value of dot.
|
||||
- A variable name, which is a (possibly empty) alphanumeric string
|
||||
preceded by a dollar sign, such as
|
||||
$piOver2
|
||||
or
|
||||
$
|
||||
The result is the value of the variable.
|
||||
Variables are described below.
|
||||
- The name of a field of the data, which must be a struct, preceded
|
||||
by a period, such as
|
||||
.Field
|
||||
The result is the value of the field. Field invocations may be
|
||||
chained:
|
||||
.Field1.Field2
|
||||
Fields can also be evaluated on variables, including chaining:
|
||||
$x.Field1.Field2
|
||||
- The name of a key of the data, which must be a map, preceded
|
||||
by a period, such as
|
||||
.Key
|
||||
The result is the map element value indexed by the key.
|
||||
Key invocations may be chained and combined with fields to any
|
||||
depth:
|
||||
.Field1.Key1.Field2.Key2
|
||||
Although the key must be an alphanumeric identifier, unlike with
|
||||
field names they do not need to start with an upper case letter.
|
||||
Keys can also be evaluated on variables, including chaining:
|
||||
$x.key1.key2
|
||||
- The name of a niladic method of the data, preceded by a period,
|
||||
such as
|
||||
.Method
|
||||
The result is the value of invoking the method with dot as the
|
||||
receiver, dot.Method(). Such a method must have one return value (of
|
||||
any type) or two return values, the second of which is an error.
|
||||
If it has two and the returned error is non-nil, execution terminates
|
||||
and an error is returned to the caller as the value of Execute.
|
||||
Method invocations may be chained and combined with fields and keys
|
||||
to any depth:
|
||||
.Field1.Key1.Method1.Field2.Key2.Method2
|
||||
Methods can also be evaluated on variables, including chaining:
|
||||
$x.Method1.Field
|
||||
- The name of a niladic function, such as
|
||||
fun
|
||||
The result is the value of invoking the function, fun(). The return
|
||||
types and values behave as in methods. Functions and function
|
||||
names are described below.
|
||||
- A parenthesized instance of one the above, for grouping. The result
|
||||
may be accessed by a field or map key invocation.
|
||||
print (.F1 arg1) (.F2 arg2)
|
||||
(.StructValuedMethod "arg").Field
|
||||
|
||||
Arguments may evaluate to any type; if they are pointers the implementation
|
||||
automatically indirects to the base type when required.
|
||||
If an evaluation yields a function value, such as a function-valued
|
||||
field of a struct, the function is not invoked automatically, but it
|
||||
can be used as a truth value for an if action and the like. To invoke
|
||||
it, use the call function, defined below.
|
||||
|
||||
A pipeline is a possibly chained sequence of "commands". A command is a simple
|
||||
value (argument) or a function or method call, possibly with multiple arguments:
|
||||
|
||||
Argument
|
||||
The result is the value of evaluating the argument.
|
||||
.Method [Argument...]
|
||||
The method can be alone or the last element of a chain but,
|
||||
unlike methods in the middle of a chain, it can take arguments.
|
||||
The result is the value of calling the method with the
|
||||
arguments:
|
||||
dot.Method(Argument1, etc.)
|
||||
functionName [Argument...]
|
||||
The result is the value of calling the function associated
|
||||
with the name:
|
||||
function(Argument1, etc.)
|
||||
Functions and function names are described below.
|
||||
|
||||
Pipelines
|
||||
|
||||
A pipeline may be "chained" by separating a sequence of commands with pipeline
|
||||
characters '|'. In a chained pipeline, the result of the each command is
|
||||
passed as the last argument of the following command. The output of the final
|
||||
command in the pipeline is the value of the pipeline.
|
||||
|
||||
The output of a command will be either one value or two values, the second of
|
||||
which has type error. If that second value is present and evaluates to
|
||||
non-nil, execution terminates and the error is returned to the caller of
|
||||
Execute.
|
||||
|
||||
Variables
|
||||
|
||||
A pipeline inside an action may initialize a variable to capture the result.
|
||||
The initialization has syntax
|
||||
|
||||
$variable := pipeline
|
||||
|
||||
where $variable is the name of the variable. An action that declares a
|
||||
variable produces no output.
|
||||
|
||||
If a "range" action initializes a variable, the variable is set to the
|
||||
successive elements of the iteration. Also, a "range" may declare two
|
||||
variables, separated by a comma:
|
||||
|
||||
range $index, $element := pipeline
|
||||
|
||||
in which case $index and $element are set to the successive values of the
|
||||
array/slice index or map key and element, respectively. Note that if there is
|
||||
only one variable, it is assigned the element; this is opposite to the
|
||||
convention in Go range clauses.
|
||||
|
||||
A variable's scope extends to the "end" action of the control structure ("if",
|
||||
"with", or "range") in which it is declared, or to the end of the template if
|
||||
there is no such control structure. A template invocation does not inherit
|
||||
variables from the point of its invocation.
|
||||
|
||||
When execution begins, $ is set to the data argument passed to Execute, that is,
|
||||
to the starting value of dot.
|
||||
|
||||
Examples
|
||||
|
||||
Here are some example one-line templates demonstrating pipelines and variables.
|
||||
All produce the quoted word "output":
|
||||
|
||||
{{"\"output\""}}
|
||||
A string constant.
|
||||
{{`"output"`}}
|
||||
A raw string constant.
|
||||
{{printf "%q" "output"}}
|
||||
A function call.
|
||||
{{"output" | printf "%q"}}
|
||||
A function call whose final argument comes from the previous
|
||||
command.
|
||||
{{printf "%q" (print "out" "put")}}
|
||||
A parenthesized argument.
|
||||
{{"put" | printf "%s%s" "out" | printf "%q"}}
|
||||
A more elaborate call.
|
||||
{{"output" | printf "%s" | printf "%q"}}
|
||||
A longer chain.
|
||||
{{with "output"}}{{printf "%q" .}}{{end}}
|
||||
A with action using dot.
|
||||
{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
|
||||
A with action that creates and uses a variable.
|
||||
{{with $x := "output"}}{{printf "%q" $x}}{{end}}
|
||||
A with action that uses the variable in another action.
|
||||
{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
|
||||
The same, but pipelined.
|
||||
|
||||
Functions
|
||||
|
||||
During execution functions are found in two function maps: first in the
|
||||
template, then in the global function map. By default, no functions are defined
|
||||
in the template but the Funcs method can be used to add them.
|
||||
|
||||
Predefined global functions are named as follows.
|
||||
|
||||
and
|
||||
Returns the boolean AND of its arguments by returning the
|
||||
first empty argument or the last argument, that is,
|
||||
"and x y" behaves as "if x then y else x". All the
|
||||
arguments are evaluated.
|
||||
call
|
||||
Returns the result of calling the first argument, which
|
||||
must be a function, with the remaining arguments as parameters.
|
||||
Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
|
||||
Y is a func-valued field, map entry, or the like.
|
||||
The first argument must be the result of an evaluation
|
||||
that yields a value of function type (as distinct from
|
||||
a predefined function such as print). The function must
|
||||
return either one or two result values, the second of which
|
||||
is of type error. If the arguments don't match the function
|
||||
or the returned error value is non-nil, execution stops.
|
||||
html
|
||||
Returns the escaped HTML equivalent of the textual
|
||||
representation of its arguments.
|
||||
index
|
||||
Returns the result of indexing its first argument by the
|
||||
following arguments. Thus "index x 1 2 3" is, in Go syntax,
|
||||
x[1][2][3]. Each indexed item must be a map, slice, or array.
|
||||
js
|
||||
Returns the escaped JavaScript equivalent of the textual
|
||||
representation of its arguments.
|
||||
len
|
||||
Returns the integer length of its argument.
|
||||
not
|
||||
Returns the boolean negation of its single argument.
|
||||
or
|
||||
Returns the boolean OR of its arguments by returning the
|
||||
first non-empty argument or the last argument, that is,
|
||||
"or x y" behaves as "if x then x else y". All the
|
||||
arguments are evaluated.
|
||||
print
|
||||
An alias for fmt.Sprint
|
||||
printf
|
||||
An alias for fmt.Sprintf
|
||||
println
|
||||
An alias for fmt.Sprintln
|
||||
urlquery
|
||||
Returns the escaped value of the textual representation of
|
||||
its arguments in a form suitable for embedding in a URL query.
|
||||
|
||||
The boolean functions take any zero value to be false and a non-zero
|
||||
value to be true.
|
||||
|
||||
There is also a set of binary comparison operators defined as
|
||||
functions:
|
||||
|
||||
eq
|
||||
Returns the boolean truth of arg1 == arg2
|
||||
ne
|
||||
Returns the boolean truth of arg1 != arg2
|
||||
lt
|
||||
Returns the boolean truth of arg1 < arg2
|
||||
le
|
||||
Returns the boolean truth of arg1 <= arg2
|
||||
gt
|
||||
Returns the boolean truth of arg1 > arg2
|
||||
ge
|
||||
Returns the boolean truth of arg1 >= arg2
|
||||
|
||||
For simpler multi-way equality tests, eq (only) accepts two or more
|
||||
arguments and compares the second and subsequent to the first,
|
||||
returning in effect
|
||||
|
||||
arg1==arg2 || arg1==arg3 || arg1==arg4 ...
|
||||
|
||||
(Unlike with || in Go, however, eq is a function call and all the
|
||||
arguments will be evaluated.)
|
||||
|
||||
The comparison functions work on basic types only (or named basic
|
||||
types, such as "type Celsius float32"). They implement the Go rules
|
||||
for comparison of values, except that size and exact type are
|
||||
ignored, so any integer value, signed or unsigned, may be compared
|
||||
with any other integer value. (The arithmetic value is compared,
|
||||
not the bit pattern, so all negative integers are less than all
|
||||
unsigned integers.) However, as usual, one may not compare an int
|
||||
with a float32 and so on.
|
||||
|
||||
Associated templates
|
||||
|
||||
Each template is named by a string specified when it is created. Also, each
|
||||
template is associated with zero or more other templates that it may invoke by
|
||||
name; such associations are transitive and form a name space of templates.
|
||||
|
||||
A template may use a template invocation to instantiate another associated
|
||||
template; see the explanation of the "template" action above. The name must be
|
||||
that of a template associated with the template that contains the invocation.
|
||||
|
||||
Nested template definitions
|
||||
|
||||
When parsing a template, another template may be defined and associated with the
|
||||
template being parsed. Template definitions must appear at the top level of the
|
||||
template, much like global variables in a Go program.
|
||||
|
||||
The syntax of such definitions is to surround each template declaration with a
|
||||
"define" and "end" action.
|
||||
|
||||
The define action names the template being created by providing a string
|
||||
constant. Here is a simple example:
|
||||
|
||||
`{{define "T1"}}ONE{{end}}
|
||||
{{define "T2"}}TWO{{end}}
|
||||
{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
|
||||
{{template "T3"}}`
|
||||
|
||||
This defines two templates, T1 and T2, and a third T3 that invokes the other two
|
||||
when it is executed. Finally it invokes T3. If executed this template will
|
||||
produce the text
|
||||
|
||||
ONE TWO
|
||||
|
||||
By construction, a template may reside in only one association. If it's
|
||||
necessary to have a template addressable from multiple associations, the
|
||||
template definition must be parsed multiple times to create distinct *Template
|
||||
values, or must be copied with the Clone or AddParseTree method.
|
||||
|
||||
Parse may be called multiple times to assemble the various associated templates;
|
||||
see the ParseFiles and ParseGlob functions and methods for simple ways to parse
|
||||
related templates stored in files.
|
||||
|
||||
A template may be executed directly or through ExecuteTemplate, which executes
|
||||
an associated template identified by name. To invoke our example above, we
|
||||
might write,
|
||||
|
||||
err := tmpl.Execute(os.Stdout, "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
or to invoke a particular template explicitly by name,
|
||||
|
||||
err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
*/
|
||||
package template
|
||||
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
Normal file
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
Normal file
@@ -0,0 +1,845 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// state represents the state of an execution. It's not part of the
|
||||
// template so that multiple executions of the same template
|
||||
// can execute in parallel.
|
||||
type state struct {
|
||||
tmpl *Template
|
||||
wr io.Writer
|
||||
node parse.Node // current node, for errors
|
||||
vars []variable // push-down stack of variable values.
|
||||
}
|
||||
|
||||
// variable holds the dynamic value of a variable such as $, $x etc.
|
||||
type variable struct {
|
||||
name string
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
// push pushes a new variable on the stack.
|
||||
func (s *state) push(name string, value reflect.Value) {
|
||||
s.vars = append(s.vars, variable{name, value})
|
||||
}
|
||||
|
||||
// mark returns the length of the variable stack.
|
||||
func (s *state) mark() int {
|
||||
return len(s.vars)
|
||||
}
|
||||
|
||||
// pop pops the variable stack up to the mark.
|
||||
func (s *state) pop(mark int) {
|
||||
s.vars = s.vars[0:mark]
|
||||
}
|
||||
|
||||
// setVar overwrites the top-nth variable on the stack. Used by range iterations.
|
||||
func (s *state) setVar(n int, value reflect.Value) {
|
||||
s.vars[len(s.vars)-n].value = value
|
||||
}
|
||||
|
||||
// varValue returns the value of the named variable.
|
||||
func (s *state) varValue(name string) reflect.Value {
|
||||
for i := s.mark() - 1; i >= 0; i-- {
|
||||
if s.vars[i].name == name {
|
||||
return s.vars[i].value
|
||||
}
|
||||
}
|
||||
s.errorf("undefined variable: %s", name)
|
||||
return zero
|
||||
}
|
||||
|
||||
var zero reflect.Value
|
||||
|
||||
// at marks the state to be on node n, for error reporting.
|
||||
func (s *state) at(node parse.Node) {
|
||||
s.node = node
|
||||
}
|
||||
|
||||
// doublePercent returns the string with %'s replaced by %%, if necessary,
|
||||
// so it can be used safely inside a Printf format string.
|
||||
func doublePercent(str string) string {
|
||||
if strings.Contains(str, "%") {
|
||||
str = strings.Replace(str, "%", "%%", -1)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (s *state) errorf(format string, args ...interface{}) {
|
||||
name := doublePercent(s.tmpl.Name())
|
||||
if s.node == nil {
|
||||
format = fmt.Sprintf("template: %s: %s", name, format)
|
||||
} else {
|
||||
location, context := s.tmpl.ErrorContext(s.node)
|
||||
format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
|
||||
}
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// errRecover is the handler that turns panics into returns from the top
|
||||
// level of Parse.
|
||||
func errRecover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
switch err := e.(type) {
|
||||
case runtime.Error:
|
||||
panic(e)
|
||||
case error:
|
||||
*errp = err
|
||||
default:
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteTemplate applies the template associated with t that has the given name
|
||||
// to the specified data object and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
|
||||
tmpl := t.tmpl[name]
|
||||
if tmpl == nil {
|
||||
return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
|
||||
}
|
||||
return tmpl.Execute(wr, data)
|
||||
}
|
||||
|
||||
// Execute applies a parsed template to the specified data object,
|
||||
// and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
|
||||
defer errRecover(&err)
|
||||
value := reflect.ValueOf(data)
|
||||
state := &state{
|
||||
tmpl: t,
|
||||
wr: wr,
|
||||
vars: []variable{{"$", value}},
|
||||
}
|
||||
t.init()
|
||||
if t.Tree == nil || t.Root == nil {
|
||||
var b bytes.Buffer
|
||||
for name, tmpl := range t.tmpl {
|
||||
if tmpl.Tree == nil || tmpl.Root == nil {
|
||||
continue
|
||||
}
|
||||
if b.Len() > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
fmt.Fprintf(&b, "%q", name)
|
||||
}
|
||||
var s string
|
||||
if b.Len() > 0 {
|
||||
s = "; defined templates are: " + b.String()
|
||||
}
|
||||
state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
|
||||
}
|
||||
state.walk(value, t.Root)
|
||||
return
|
||||
}
|
||||
|
||||
// Walk functions step through the major pieces of the template structure,
|
||||
// generating output as they go.
|
||||
func (s *state) walk(dot reflect.Value, node parse.Node) {
|
||||
s.at(node)
|
||||
switch node := node.(type) {
|
||||
case *parse.ActionNode:
|
||||
// Do not pop variables so they persist until next end.
|
||||
// Also, if the action declares variables, don't print the result.
|
||||
val := s.evalPipeline(dot, node.Pipe)
|
||||
if len(node.Pipe.Decl) == 0 {
|
||||
s.printValue(node, val)
|
||||
}
|
||||
case *parse.IfNode:
|
||||
s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
|
||||
case *parse.ListNode:
|
||||
for _, node := range node.Nodes {
|
||||
s.walk(dot, node)
|
||||
}
|
||||
case *parse.RangeNode:
|
||||
s.walkRange(dot, node)
|
||||
case *parse.TemplateNode:
|
||||
s.walkTemplate(dot, node)
|
||||
case *parse.TextNode:
|
||||
if _, err := s.wr.Write(node.Text); err != nil {
|
||||
s.errorf("%s", err)
|
||||
}
|
||||
case *parse.WithNode:
|
||||
s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
|
||||
default:
|
||||
s.errorf("unknown node: %s", node)
|
||||
}
|
||||
}
|
||||
|
||||
// walkIfOrWith walks an 'if' or 'with' node. The two control structures
|
||||
// are identical in behavior except that 'with' sets dot.
|
||||
func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
|
||||
defer s.pop(s.mark())
|
||||
val := s.evalPipeline(dot, pipe)
|
||||
truth, ok := isTrue(val)
|
||||
if !ok {
|
||||
s.errorf("if/with can't use %v", val)
|
||||
}
|
||||
if truth {
|
||||
if typ == parse.NodeWith {
|
||||
s.walk(val, list)
|
||||
} else {
|
||||
s.walk(dot, list)
|
||||
}
|
||||
} else if elseList != nil {
|
||||
s.walk(dot, elseList)
|
||||
}
|
||||
}
|
||||
|
||||
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
|
||||
// and whether the value has a meaningful truth value.
|
||||
func isTrue(val reflect.Value) (truth, ok bool) {
|
||||
if !val.IsValid() {
|
||||
// Something like var x interface{}, never set. It's a form of nil.
|
||||
return false, true
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
truth = val.Len() > 0
|
||||
case reflect.Bool:
|
||||
truth = val.Bool()
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
truth = val.Complex() != 0
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
|
||||
truth = !val.IsNil()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
truth = val.Int() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
truth = val.Float() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
truth = val.Uint() != 0
|
||||
case reflect.Struct:
|
||||
truth = true // Struct values are always true.
|
||||
default:
|
||||
return
|
||||
}
|
||||
return truth, true
|
||||
}
|
||||
|
||||
func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
|
||||
s.at(r)
|
||||
defer s.pop(s.mark())
|
||||
val, _ := indirect(s.evalPipeline(dot, r.Pipe))
|
||||
// mark top of stack before any variables in the body are pushed.
|
||||
mark := s.mark()
|
||||
oneIteration := func(index, elem reflect.Value) {
|
||||
// Set top var (lexically the second if there are two) to the element.
|
||||
if len(r.Pipe.Decl) > 0 {
|
||||
s.setVar(1, elem)
|
||||
}
|
||||
// Set next var (lexically the first if there are two) to the index.
|
||||
if len(r.Pipe.Decl) > 1 {
|
||||
s.setVar(2, index)
|
||||
}
|
||||
s.walk(elem, r.List)
|
||||
s.pop(mark)
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
oneIteration(reflect.ValueOf(i), val.Index(i))
|
||||
}
|
||||
return
|
||||
case reflect.Map:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for _, key := range sortKeys(val.MapKeys()) {
|
||||
oneIteration(key, val.MapIndex(key))
|
||||
}
|
||||
return
|
||||
case reflect.Chan:
|
||||
if val.IsNil() {
|
||||
break
|
||||
}
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
elem, ok := val.Recv()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
oneIteration(reflect.ValueOf(i), elem)
|
||||
}
|
||||
if i == 0 {
|
||||
break
|
||||
}
|
||||
return
|
||||
case reflect.Invalid:
|
||||
break // An invalid value is likely a nil map, etc. and acts like an empty map.
|
||||
default:
|
||||
s.errorf("range can't iterate over %v", val)
|
||||
}
|
||||
if r.ElseList != nil {
|
||||
s.walk(dot, r.ElseList)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
|
||||
s.at(t)
|
||||
tmpl := s.tmpl.tmpl[t.Name]
|
||||
if tmpl == nil {
|
||||
s.errorf("template %q not defined", t.Name)
|
||||
}
|
||||
// Variables declared by the pipeline persist.
|
||||
dot = s.evalPipeline(dot, t.Pipe)
|
||||
newState := *s
|
||||
newState.tmpl = tmpl
|
||||
// No dynamic scoping: template invocations inherit no variables.
|
||||
newState.vars = []variable{{"$", dot}}
|
||||
newState.walk(dot, tmpl.Root)
|
||||
}
|
||||
|
||||
// Eval functions evaluate pipelines, commands, and their elements and extract
|
||||
// values from the data structure by examining fields, calling methods, and so on.
|
||||
// The printing of those values happens only through walk functions.
|
||||
|
||||
// evalPipeline returns the value acquired by evaluating a pipeline. If the
|
||||
// pipeline has a variable declaration, the variable will be pushed on the
|
||||
// stack. Callers should therefore pop the stack after they are finished
|
||||
// executing commands depending on the pipeline value.
|
||||
func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
|
||||
if pipe == nil {
|
||||
return
|
||||
}
|
||||
s.at(pipe)
|
||||
for _, cmd := range pipe.Cmds {
|
||||
value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
|
||||
// If the object has type interface{}, dig down one level to the thing inside.
|
||||
if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
|
||||
value = reflect.ValueOf(value.Interface()) // lovely!
|
||||
}
|
||||
}
|
||||
for _, variable := range pipe.Decl {
|
||||
s.push(variable.Ident[0], value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
|
||||
if len(args) > 1 || final.IsValid() {
|
||||
s.errorf("can't give argument to non-function %s", args[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
|
||||
firstWord := cmd.Args[0]
|
||||
switch n := firstWord.(type) {
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, cmd.Args, final)
|
||||
case *parse.ChainNode:
|
||||
return s.evalChainNode(dot, n, cmd.Args, final)
|
||||
case *parse.IdentifierNode:
|
||||
// Must be a function.
|
||||
return s.evalFunction(dot, n, cmd, cmd.Args, final)
|
||||
case *parse.PipeNode:
|
||||
// Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
|
||||
return s.evalPipeline(dot, n)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, cmd.Args, final)
|
||||
}
|
||||
s.at(firstWord)
|
||||
s.notAFunction(cmd.Args, final)
|
||||
switch word := firstWord.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(word.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.NilNode:
|
||||
s.errorf("nil is not a command")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(word)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(word.Text)
|
||||
}
|
||||
s.errorf("can't evaluate command %q", firstWord)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// idealConstant is called to return the value of a number in a context where
|
||||
// we don't know the type. In that case, the syntax of the number tells us
|
||||
// its type, and we use Go rules to resolve. Note there is no such thing as
|
||||
// a uint ideal constant in this situation - the value must be of int type.
|
||||
func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
|
||||
// These are ideal constants but we don't know the type
|
||||
// and we have no context. (If it was a method argument,
|
||||
// we'd know what we need.) The syntax guides us to some extent.
|
||||
s.at(constant)
|
||||
switch {
|
||||
case constant.IsComplex:
|
||||
return reflect.ValueOf(constant.Complex128) // incontrovertible.
|
||||
case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
|
||||
return reflect.ValueOf(constant.Float64)
|
||||
case constant.IsInt:
|
||||
n := int(constant.Int64)
|
||||
if int64(n) != constant.Int64 {
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return reflect.ValueOf(n)
|
||||
case constant.IsUint:
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return zero
|
||||
}
|
||||
|
||||
func isHexConstant(s string) bool {
|
||||
return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
|
||||
}
|
||||
|
||||
func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(field)
|
||||
return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(chain)
|
||||
// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
|
||||
pipe := s.evalArg(dot, nil, chain.Node)
|
||||
if len(chain.Field) == 0 {
|
||||
s.errorf("internal error: no fields in evalChainNode")
|
||||
}
|
||||
return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
|
||||
s.at(variable)
|
||||
value := s.varValue(variable.Ident[0])
|
||||
if len(variable.Ident) == 1 {
|
||||
s.notAFunction(args, final)
|
||||
return value
|
||||
}
|
||||
return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
|
||||
}
|
||||
|
||||
// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
|
||||
// dot is the environment in which to evaluate arguments, while
|
||||
// receiver is the value being walked along the chain.
|
||||
func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
n := len(ident)
|
||||
for i := 0; i < n-1; i++ {
|
||||
receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
|
||||
}
|
||||
// Now if it's a method, it gets the arguments.
|
||||
return s.evalField(dot, ident[n-1], node, args, final, receiver)
|
||||
}
|
||||
|
||||
func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(node)
|
||||
name := node.Ident
|
||||
function, ok := findFunction(name, s.tmpl)
|
||||
if !ok {
|
||||
s.errorf("%q is not a defined function", name)
|
||||
}
|
||||
return s.evalCall(dot, function, cmd, name, args, final)
|
||||
}
|
||||
|
||||
// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
|
||||
// The 'final' argument represents the return value from the preceding
|
||||
// value of the pipeline, if any.
|
||||
func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
|
||||
if !receiver.IsValid() {
|
||||
return zero
|
||||
}
|
||||
typ := receiver.Type()
|
||||
receiver, _ = indirect(receiver)
|
||||
// Unless it's an interface, need to get to a value of type *T to guarantee
|
||||
// we see all methods of T and *T.
|
||||
ptr := receiver
|
||||
if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
|
||||
ptr = ptr.Addr()
|
||||
}
|
||||
if method := ptr.MethodByName(fieldName); method.IsValid() {
|
||||
return s.evalCall(dot, method, node, fieldName, args, final)
|
||||
}
|
||||
hasArgs := len(args) > 1 || final.IsValid()
|
||||
// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
|
||||
receiver, isNil := indirect(receiver)
|
||||
if isNil {
|
||||
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
|
||||
}
|
||||
switch receiver.Kind() {
|
||||
case reflect.Struct:
|
||||
tField, ok := receiver.Type().FieldByName(fieldName)
|
||||
if ok {
|
||||
field := receiver.FieldByIndex(tField.Index)
|
||||
if tField.PkgPath != "" { // field is unexported
|
||||
s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
|
||||
}
|
||||
// If it's a function, we must call it.
|
||||
if hasArgs {
|
||||
s.errorf("%s has arguments but cannot be invoked as function", fieldName)
|
||||
}
|
||||
return field
|
||||
}
|
||||
s.errorf("%s is not a field of struct type %s", fieldName, typ)
|
||||
case reflect.Map:
|
||||
// If it's a map, attempt to use the field name as a key.
|
||||
nameVal := reflect.ValueOf(fieldName)
|
||||
if nameVal.Type().AssignableTo(receiver.Type().Key()) {
|
||||
if hasArgs {
|
||||
s.errorf("%s is not a method but has arguments", fieldName)
|
||||
}
|
||||
return receiver.MapIndex(nameVal)
|
||||
}
|
||||
}
|
||||
s.errorf("can't evaluate field %s in type %s", fieldName, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
var (
|
||||
errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
|
||||
)
|
||||
|
||||
// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
|
||||
// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
|
||||
// as the function itself.
|
||||
func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
if args != nil {
|
||||
args = args[1:] // Zeroth arg is function name/node; not passed to function.
|
||||
}
|
||||
typ := fun.Type()
|
||||
numIn := len(args)
|
||||
if final.IsValid() {
|
||||
numIn++
|
||||
}
|
||||
numFixed := len(args)
|
||||
if typ.IsVariadic() {
|
||||
numFixed = typ.NumIn() - 1 // last arg is the variadic one.
|
||||
if numIn < numFixed {
|
||||
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
|
||||
}
|
||||
} else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
|
||||
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
// TODO: This could still be a confusing error; maybe goodFunc should provide info.
|
||||
s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
|
||||
}
|
||||
// Build the arg list.
|
||||
argv := make([]reflect.Value, numIn)
|
||||
// Args must be evaluated. Fixed args first.
|
||||
i := 0
|
||||
for ; i < numFixed && i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, typ.In(i), args[i])
|
||||
}
|
||||
// Now the ... args.
|
||||
if typ.IsVariadic() {
|
||||
argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
|
||||
for ; i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, argType, args[i])
|
||||
}
|
||||
}
|
||||
// Add final value if necessary.
|
||||
if final.IsValid() {
|
||||
t := typ.In(typ.NumIn() - 1)
|
||||
if typ.IsVariadic() {
|
||||
t = t.Elem()
|
||||
}
|
||||
argv[i] = s.validateType(final, t)
|
||||
}
|
||||
result := fun.Call(argv)
|
||||
// If we have an error that is not nil, stop execution and return that error to the caller.
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
s.at(node)
|
||||
s.errorf("error calling %s: %s", name, result[1].Interface().(error))
|
||||
}
|
||||
return result[0]
|
||||
}
|
||||
|
||||
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
|
||||
func canBeNil(typ reflect.Type) bool {
|
||||
switch typ.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// validateType guarantees that the value is valid and assignable to the type.
|
||||
func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
|
||||
if !value.IsValid() {
|
||||
if typ == nil || canBeNil(typ) {
|
||||
// An untyped nil interface{}. Accept as a proper nil value.
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("invalid value; expected %s", typ)
|
||||
}
|
||||
if typ != nil && !value.Type().AssignableTo(typ) {
|
||||
if value.Kind() == reflect.Interface && !value.IsNil() {
|
||||
value = value.Elem()
|
||||
if value.Type().AssignableTo(typ) {
|
||||
return value
|
||||
}
|
||||
// fallthrough
|
||||
}
|
||||
// Does one dereference or indirection work? We could do more, as we
|
||||
// do with method receivers, but that gets messy and method receivers
|
||||
// are much more constrained, so it makes more sense there than here.
|
||||
// Besides, one is almost always all you need.
|
||||
switch {
|
||||
case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
|
||||
value = value.Elem()
|
||||
if !value.IsValid() {
|
||||
s.errorf("dereference of nil pointer of type %s", typ)
|
||||
}
|
||||
case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
|
||||
value = value.Addr()
|
||||
default:
|
||||
s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch arg := n.(type) {
|
||||
case *parse.DotNode:
|
||||
return s.validateType(dot, typ)
|
||||
case *parse.NilNode:
|
||||
if canBeNil(typ) {
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("cannot assign nil to %s", typ)
|
||||
case *parse.FieldNode:
|
||||
return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
|
||||
case *parse.VariableNode:
|
||||
return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
|
||||
case *parse.PipeNode:
|
||||
return s.validateType(s.evalPipeline(dot, arg), typ)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, arg, arg, nil, zero)
|
||||
case *parse.ChainNode:
|
||||
return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return s.evalBool(typ, n)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return s.evalComplex(typ, n)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return s.evalFloat(typ, n)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return s.evalInteger(typ, n)
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return s.evalEmptyInterface(dot, n)
|
||||
}
|
||||
case reflect.String:
|
||||
return s.evalString(typ, n)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return s.evalUnsignedInteger(typ, n)
|
||||
}
|
||||
s.errorf("can't handle %s for arg of type %s", n, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.BoolNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetBool(n.True)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected bool; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.StringNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetString(n.Text)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected string; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetInt(n.Int64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetUint(n.Uint64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected unsigned integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetFloat(n.Float64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected float; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetComplex(n.Complex128)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected complex; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch n := n.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(n.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, nil, zero)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, n, n, nil, zero)
|
||||
case *parse.NilNode:
|
||||
// NilNode is handled in evalArg, the only place that calls here.
|
||||
s.errorf("evalEmptyInterface: nil (can't happen)")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(n)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(n.Text)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, nil, zero)
|
||||
case *parse.PipeNode:
|
||||
return s.evalPipeline(dot, n)
|
||||
}
|
||||
s.errorf("can't handle assignment of %s to empty interface argument", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
|
||||
// We indirect through pointers and empty interfaces (only) because
|
||||
// non-empty interfaces have methods we might need.
|
||||
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
|
||||
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
|
||||
if v.IsNil() {
|
||||
return v, true
|
||||
}
|
||||
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
||||
|
||||
// printValue writes the textual representation of the value to the output of
|
||||
// the template.
|
||||
func (s *state) printValue(n parse.Node, v reflect.Value) {
|
||||
s.at(n)
|
||||
iface, ok := printableValue(v)
|
||||
if !ok {
|
||||
s.errorf("can't print %s of type %s", n, v.Type())
|
||||
}
|
||||
fmt.Fprint(s.wr, iface)
|
||||
}
|
||||
|
||||
// printableValue returns the, possibly indirected, interface value inside v that
|
||||
// is best for a call to formatted printer.
|
||||
func printableValue(v reflect.Value) (interface{}, bool) {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v, _ = indirect(v) // fmt.Fprint handles nil.
|
||||
}
|
||||
if !v.IsValid() {
|
||||
return "<no value>", true
|
||||
}
|
||||
|
||||
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
|
||||
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
|
||||
v = v.Addr()
|
||||
} else {
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return v.Interface(), true
|
||||
}
|
||||
|
||||
// Types to help sort the keys in a map for reproducible output.
|
||||
|
||||
type rvs []reflect.Value
|
||||
|
||||
func (x rvs) Len() int { return len(x) }
|
||||
func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
type rvInts struct{ rvs }
|
||||
|
||||
func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
|
||||
|
||||
type rvUints struct{ rvs }
|
||||
|
||||
func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
|
||||
|
||||
type rvFloats struct{ rvs }
|
||||
|
||||
func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
|
||||
|
||||
type rvStrings struct{ rvs }
|
||||
|
||||
func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
|
||||
|
||||
// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
|
||||
func sortKeys(v []reflect.Value) []reflect.Value {
|
||||
if len(v) <= 1 {
|
||||
return v
|
||||
}
|
||||
switch v[0].Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sort.Sort(rvFloats{v})
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
sort.Sort(rvInts{v})
|
||||
case reflect.String:
|
||||
sort.Sort(rvStrings{v})
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
sort.Sort(rvUints{v})
|
||||
}
|
||||
return v
|
||||
}
|
||||
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
Normal file
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
Normal file
@@ -0,0 +1,598 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// FuncMap is the type of the map defining the mapping from names to functions.
|
||||
// Each function must have either a single return value, or two return values of
|
||||
// which the second has type error. In that case, if the second (error)
|
||||
// return value evaluates to non-nil during execution, execution terminates and
|
||||
// Execute returns that error.
|
||||
type FuncMap map[string]interface{}
|
||||
|
||||
var builtins = FuncMap{
|
||||
"and": and,
|
||||
"call": call,
|
||||
"html": HTMLEscaper,
|
||||
"index": index,
|
||||
"js": JSEscaper,
|
||||
"len": length,
|
||||
"not": not,
|
||||
"or": or,
|
||||
"print": fmt.Sprint,
|
||||
"printf": fmt.Sprintf,
|
||||
"println": fmt.Sprintln,
|
||||
"urlquery": URLQueryEscaper,
|
||||
|
||||
// Comparisons
|
||||
"eq": eq, // ==
|
||||
"ge": ge, // >=
|
||||
"gt": gt, // >
|
||||
"le": le, // <=
|
||||
"lt": lt, // <
|
||||
"ne": ne, // !=
|
||||
}
|
||||
|
||||
var builtinFuncs = createValueFuncs(builtins)
|
||||
|
||||
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
|
||||
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
|
||||
m := make(map[string]reflect.Value)
|
||||
addValueFuncs(m, funcMap)
|
||||
return m
|
||||
}
|
||||
|
||||
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
|
||||
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
v := reflect.ValueOf(fn)
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("value for " + name + " not a function")
|
||||
}
|
||||
if !goodFunc(v.Type()) {
|
||||
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
|
||||
}
|
||||
out[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
// addFuncs adds to values the functions in funcs. It does no checking of the input -
|
||||
// call addValueFuncs first.
|
||||
func addFuncs(out, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
out[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// goodFunc checks that the function or method has the right result signature.
|
||||
func goodFunc(typ reflect.Type) bool {
|
||||
// We allow functions with 1 result or 2 results where the second is an error.
|
||||
switch {
|
||||
case typ.NumOut() == 1:
|
||||
return true
|
||||
case typ.NumOut() == 2 && typ.Out(1) == errorType:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findFunction looks for a function in the template, and global map.
|
||||
func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
|
||||
if tmpl != nil && tmpl.common != nil {
|
||||
if fn := tmpl.execFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
}
|
||||
if fn := builtinFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
// Indexing.
|
||||
|
||||
// index returns the result of indexing its first argument by the following
|
||||
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
|
||||
// indexed item must be a map, slice, or array.
|
||||
func index(item interface{}, indices ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(item)
|
||||
for _, i := range indices {
|
||||
index := reflect.ValueOf(i)
|
||||
var isNil bool
|
||||
if v, isNil = indirect(v); isNil {
|
||||
return nil, fmt.Errorf("index of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
var x int64
|
||||
switch index.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x = index.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
x = int64(index.Uint())
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
|
||||
}
|
||||
if x < 0 || x >= int64(v.Len()) {
|
||||
return nil, fmt.Errorf("index out of range: %d", x)
|
||||
}
|
||||
v = v.Index(int(x))
|
||||
case reflect.Map:
|
||||
if !index.IsValid() {
|
||||
index = reflect.Zero(v.Type().Key())
|
||||
}
|
||||
if !index.Type().AssignableTo(v.Type().Key()) {
|
||||
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
|
||||
}
|
||||
if x := v.MapIndex(index); x.IsValid() {
|
||||
v = x
|
||||
} else {
|
||||
v = reflect.Zero(v.Type().Elem())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("can't index item of type %s", v.Type())
|
||||
}
|
||||
}
|
||||
return v.Interface(), nil
|
||||
}
|
||||
|
||||
// Length
|
||||
|
||||
// length returns the length of the item, with an error if it has no defined length.
|
||||
func length(item interface{}) (int, error) {
|
||||
v, isNil := indirect(reflect.ValueOf(item))
|
||||
if isNil {
|
||||
return 0, fmt.Errorf("len of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len(), nil
|
||||
}
|
||||
return 0, fmt.Errorf("len of type %s", v.Type())
|
||||
}
|
||||
|
||||
// Function invocation
|
||||
|
||||
// call returns the result of evaluating the first argument as a function.
|
||||
// The function must return 1 result, or 2 results, the second of which is an error.
|
||||
func call(fn interface{}, args ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(fn)
|
||||
typ := v.Type()
|
||||
if typ.Kind() != reflect.Func {
|
||||
return nil, fmt.Errorf("non-function of type %s", typ)
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
|
||||
}
|
||||
numIn := typ.NumIn()
|
||||
var dddType reflect.Type
|
||||
if typ.IsVariadic() {
|
||||
if len(args) < numIn-1 {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
|
||||
}
|
||||
dddType = typ.In(numIn - 1).Elem()
|
||||
} else {
|
||||
if len(args) != numIn {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
|
||||
}
|
||||
}
|
||||
argv := make([]reflect.Value, len(args))
|
||||
for i, arg := range args {
|
||||
value := reflect.ValueOf(arg)
|
||||
// Compute the expected type. Clumsy because of variadics.
|
||||
var argType reflect.Type
|
||||
if !typ.IsVariadic() || i < numIn-1 {
|
||||
argType = typ.In(i)
|
||||
} else {
|
||||
argType = dddType
|
||||
}
|
||||
if !value.IsValid() && canBeNil(argType) {
|
||||
value = reflect.Zero(argType)
|
||||
}
|
||||
if !value.Type().AssignableTo(argType) {
|
||||
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
|
||||
}
|
||||
argv[i] = value
|
||||
}
|
||||
result := v.Call(argv)
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
return result[0].Interface(), result[1].Interface().(error)
|
||||
}
|
||||
return result[0].Interface(), nil
|
||||
}
|
||||
|
||||
// Boolean logic.
|
||||
|
||||
func truth(a interface{}) bool {
|
||||
t, _ := isTrue(reflect.ValueOf(a))
|
||||
return t
|
||||
}
|
||||
|
||||
// and computes the Boolean AND of its arguments, returning
|
||||
// the first false argument it encounters, or the last argument.
|
||||
func and(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if !truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if !truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// or computes the Boolean OR of its arguments, returning
|
||||
// the first true argument it encounters, or the last argument.
|
||||
func or(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// not returns the Boolean negation of its argument.
|
||||
func not(arg interface{}) (truth bool) {
|
||||
truth, _ = isTrue(reflect.ValueOf(arg))
|
||||
return !truth
|
||||
}
|
||||
|
||||
// Comparison.
|
||||
|
||||
// TODO: Perhaps allow comparison between signed and unsigned integers.
|
||||
|
||||
var (
|
||||
errBadComparisonType = errors.New("invalid type for comparison")
|
||||
errBadComparison = errors.New("incompatible types for comparison")
|
||||
errNoComparison = errors.New("missing argument for comparison")
|
||||
)
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
invalidKind kind = iota
|
||||
boolKind
|
||||
complexKind
|
||||
intKind
|
||||
floatKind
|
||||
integerKind
|
||||
stringKind
|
||||
uintKind
|
||||
)
|
||||
|
||||
func basicKind(v reflect.Value) (kind, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolKind, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intKind, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintKind, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return floatKind, nil
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return complexKind, nil
|
||||
case reflect.String:
|
||||
return stringKind, nil
|
||||
}
|
||||
return invalidKind, errBadComparisonType
|
||||
}
|
||||
|
||||
// eq evaluates the comparison a == b || a == c || ...
|
||||
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(arg2) == 0 {
|
||||
return false, errNoComparison
|
||||
}
|
||||
for _, arg := range arg2 {
|
||||
v2 := reflect.ValueOf(arg)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind:
|
||||
truth = v1.Bool() == v2.Bool()
|
||||
case complexKind:
|
||||
truth = v1.Complex() == v2.Complex()
|
||||
case floatKind:
|
||||
truth = v1.Float() == v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() == v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() == v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() == v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
if truth {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ne evaluates the comparison a != b.
|
||||
func ne(arg1, arg2 interface{}) (bool, error) {
|
||||
// != is the inverse of ==.
|
||||
equal, err := eq(arg1, arg2)
|
||||
return !equal, err
|
||||
}
|
||||
|
||||
// lt evaluates the comparison a < b.
|
||||
func lt(arg1, arg2 interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v2 := reflect.ValueOf(arg2)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind, complexKind:
|
||||
return false, errBadComparisonType
|
||||
case floatKind:
|
||||
truth = v1.Float() < v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() < v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() < v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() < v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
return truth, nil
|
||||
}
|
||||
|
||||
// le evaluates the comparison <= b.
|
||||
func le(arg1, arg2 interface{}) (bool, error) {
|
||||
// <= is < or ==.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if lessThan || err != nil {
|
||||
return lessThan, err
|
||||
}
|
||||
return eq(arg1, arg2)
|
||||
}
|
||||
|
||||
// gt evaluates the comparison a > b.
|
||||
func gt(arg1, arg2 interface{}) (bool, error) {
|
||||
// > is the inverse of <=.
|
||||
lessOrEqual, err := le(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessOrEqual, nil
|
||||
}
|
||||
|
||||
// ge evaluates the comparison a >= b.
|
||||
func ge(arg1, arg2 interface{}) (bool, error) {
|
||||
// >= is the inverse of <.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessThan, nil
|
||||
}
|
||||
|
||||
// HTML escaping.
|
||||
|
||||
var (
|
||||
htmlQuot = []byte(""") // shorter than """
|
||||
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
|
||||
htmlAmp = []byte("&")
|
||||
htmlLt = []byte("<")
|
||||
htmlGt = []byte(">")
|
||||
)
|
||||
|
||||
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
|
||||
func HTMLEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i, c := range b {
|
||||
var html []byte
|
||||
switch c {
|
||||
case '"':
|
||||
html = htmlQuot
|
||||
case '\'':
|
||||
html = htmlApos
|
||||
case '&':
|
||||
html = htmlAmp
|
||||
case '<':
|
||||
html = htmlLt
|
||||
case '>':
|
||||
html = htmlGt
|
||||
default:
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
w.Write(html)
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
|
||||
func HTMLEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexAny(s, `'"&<>`) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
HTMLEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// HTMLEscaper returns the escaped HTML equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func HTMLEscaper(args ...interface{}) string {
|
||||
return HTMLEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// JavaScript escaping.
|
||||
|
||||
var (
|
||||
jsLowUni = []byte(`\u00`)
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
|
||||
jsBackslash = []byte(`\\`)
|
||||
jsApos = []byte(`\'`)
|
||||
jsQuot = []byte(`\"`)
|
||||
jsLt = []byte(`\x3C`)
|
||||
jsGt = []byte(`\x3E`)
|
||||
)
|
||||
|
||||
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
|
||||
func JSEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
if !jsIsSpecial(rune(c)) {
|
||||
// fast path: nothing to do
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
// Quotes, slashes and angle brackets get quoted.
|
||||
// Control characters get written as \u00XX.
|
||||
switch c {
|
||||
case '\\':
|
||||
w.Write(jsBackslash)
|
||||
case '\'':
|
||||
w.Write(jsApos)
|
||||
case '"':
|
||||
w.Write(jsQuot)
|
||||
case '<':
|
||||
w.Write(jsLt)
|
||||
case '>':
|
||||
w.Write(jsGt)
|
||||
default:
|
||||
w.Write(jsLowUni)
|
||||
t, b := c>>4, c&0x0f
|
||||
w.Write(hex[t : t+1])
|
||||
w.Write(hex[b : b+1])
|
||||
}
|
||||
} else {
|
||||
// Unicode rune.
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.IsPrint(r) {
|
||||
w.Write(b[i : i+size])
|
||||
} else {
|
||||
fmt.Fprintf(w, "\\u%04X", r)
|
||||
}
|
||||
i += size - 1
|
||||
}
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
|
||||
func JSEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexFunc(s, jsIsSpecial) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
JSEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func jsIsSpecial(r rune) bool {
|
||||
switch r {
|
||||
case '\\', '\'', '"', '<', '>':
|
||||
return true
|
||||
}
|
||||
return r < ' ' || utf8.RuneSelf <= r
|
||||
}
|
||||
|
||||
// JSEscaper returns the escaped JavaScript equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func JSEscaper(args ...interface{}) string {
|
||||
return JSEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// URLQueryEscaper returns the escaped value of the textual representation of
|
||||
// its arguments in a form suitable for embedding in a URL query.
|
||||
func URLQueryEscaper(args ...interface{}) string {
|
||||
return url.QueryEscape(evalArgs(args))
|
||||
}
|
||||
|
||||
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
|
||||
// fmt.Sprint(args...)
|
||||
// except that each argument is indirected (if a pointer), as required,
|
||||
// using the same rules as the default string evaluation during template
|
||||
// execution.
|
||||
func evalArgs(args []interface{}) string {
|
||||
ok := false
|
||||
var s string
|
||||
// Fast path for simple common case.
|
||||
if len(args) == 1 {
|
||||
s, ok = args[0].(string)
|
||||
}
|
||||
if !ok {
|
||||
for i, arg := range args {
|
||||
a, ok := printableValue(reflect.ValueOf(arg))
|
||||
if ok {
|
||||
args[i] = a
|
||||
} // else left fmt do its thing
|
||||
}
|
||||
s = fmt.Sprint(args...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
Normal file
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Helper functions to make constructing templates easier.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Functions and methods to parse templates.
|
||||
|
||||
// Must is a helper that wraps a call to a function returning (*Template, error)
|
||||
// and panics if the error is non-nil. It is intended for use in variable
|
||||
// initializations such as
|
||||
// var t = template.Must(template.New("name").Parse("text"))
|
||||
func Must(t *Template, err error) *Template {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ParseFiles creates a new Template and parses the template definitions from
|
||||
// the named files. The returned template's name will have the (base) name and
|
||||
// (parsed) contents of the first file. There must be at least one file.
|
||||
// If an error occurs, parsing stops and the returned *Template is nil.
|
||||
func ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(nil, filenames...)
|
||||
}
|
||||
|
||||
// ParseFiles parses the named files and associates the resulting templates with
|
||||
// t. If an error occurs, parsing stops and the returned template is nil;
|
||||
// otherwise it is t. There must be at least one file.
|
||||
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
|
||||
// parseFiles is the helper for the method and function. If the argument
|
||||
// template is nil, it is created from the first file.
|
||||
func parseFiles(t *Template, filenames ...string) (*Template, error) {
|
||||
if len(filenames) == 0 {
|
||||
// Not really a problem, but be consistent.
|
||||
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
|
||||
}
|
||||
for _, filename := range filenames {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := string(b)
|
||||
name := filepath.Base(filename)
|
||||
// First template becomes return value if not already defined,
|
||||
// and we use that one for subsequent New calls to associate
|
||||
// all the templates together. Also, if this file has the same name
|
||||
// as t, this file becomes the contents of t, so
|
||||
// t, err := New(name).Funcs(xxx).ParseFiles(name)
|
||||
// works. Otherwise we create a new template associated with t.
|
||||
var tmpl *Template
|
||||
if t == nil {
|
||||
t = New(name)
|
||||
}
|
||||
if name == t.Name() {
|
||||
tmpl = t
|
||||
} else {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
_, err = tmpl.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// ParseGlob creates a new Template and parses the template definitions from the
|
||||
// files identified by the pattern, which must match at least one file. The
|
||||
// returned template will have the (base) name and (parsed) contents of the
|
||||
// first file matched by the pattern. ParseGlob is equivalent to calling
|
||||
// ParseFiles with the list of files matched by the pattern.
|
||||
func ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(nil, pattern)
|
||||
}
|
||||
|
||||
// ParseGlob parses the template definitions in the files identified by the
|
||||
// pattern and associates the resulting templates with t. The pattern is
|
||||
// processed by filepath.Glob and must match at least one file. ParseGlob is
|
||||
// equivalent to calling t.ParseFiles with the list of files matched by the
|
||||
// pattern.
|
||||
func (t *Template) ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(t, pattern)
|
||||
}
|
||||
|
||||
// parseGlob is the implementation of the function and method ParseGlob.
|
||||
func parseGlob(t *Template, pattern string) (*Template, error) {
|
||||
filenames, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(filenames) == 0 {
|
||||
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
|
||||
}
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
Normal file
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
Normal file
@@ -0,0 +1,556 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
func (i item) String() string {
|
||||
switch {
|
||||
case i.typ == itemEOF:
|
||||
return "EOF"
|
||||
case i.typ == itemError:
|
||||
return i.val
|
||||
case i.typ > itemKeyword:
|
||||
return fmt.Sprintf("<%s>", i.val)
|
||||
case len(i.val) > 10:
|
||||
return fmt.Sprintf("%.10q...", i.val)
|
||||
}
|
||||
return fmt.Sprintf("%q", i.val)
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota // error occurred; value is text of error
|
||||
itemBool // boolean constant
|
||||
itemChar // printable ASCII character; grab bag for comma etc.
|
||||
itemCharConstant // character constant
|
||||
itemComplex // complex constant (1+2i); imaginary is just a number
|
||||
itemColonEquals // colon-equals (':=') introducing a declaration
|
||||
itemEOF
|
||||
itemField // alphanumeric identifier starting with '.'
|
||||
itemIdentifier // alphanumeric identifier not starting with '.'
|
||||
itemLeftDelim // left action delimiter
|
||||
itemLeftParen // '(' inside action
|
||||
itemNumber // simple number, including imaginary
|
||||
itemPipe // pipe symbol
|
||||
itemRawString // raw quoted string (includes quotes)
|
||||
itemRightDelim // right action delimiter
|
||||
itemElideNewline // elide newline after right delim
|
||||
itemRightParen // ')' inside action
|
||||
itemSpace // run of spaces separating arguments
|
||||
itemString // quoted string (includes quotes)
|
||||
itemText // plain text
|
||||
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
|
||||
// Keywords appear after all the rest.
|
||||
itemKeyword // used only to delimit the keywords
|
||||
itemDot // the cursor, spelled '.'
|
||||
itemDefine // define keyword
|
||||
itemElse // else keyword
|
||||
itemEnd // end keyword
|
||||
itemIf // if keyword
|
||||
itemNil // the untyped nil constant, easiest to treat as a keyword
|
||||
itemRange // range keyword
|
||||
itemTemplate // template keyword
|
||||
itemWith // with keyword
|
||||
)
|
||||
|
||||
var key = map[string]itemType{
|
||||
".": itemDot,
|
||||
"define": itemDefine,
|
||||
"else": itemElse,
|
||||
"end": itemEnd,
|
||||
"if": itemIf,
|
||||
"range": itemRange,
|
||||
"nil": itemNil,
|
||||
"template": itemTemplate,
|
||||
"with": itemWith,
|
||||
}
|
||||
|
||||
const eof = -1
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
name string // the name of the input; used only for error reports
|
||||
input string // the string being scanned
|
||||
leftDelim string // start of action
|
||||
rightDelim string // end of action
|
||||
state stateFn // the next lexing function to enter
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
lastPos Pos // position of most recent item returned by nextItem
|
||||
items chan item // channel of scanned items
|
||||
parenDepth int // nesting depth of ( ) exprs
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType) {
|
||||
l.items <- item{t, l.start, l.input[l.start:l.pos]}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.IndexRune(valid, l.next()) >= 0 {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.IndexRune(valid, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
// lineNumber reports which line we're on, based on the position of
|
||||
// the previous item returned by nextItem. Doing it this way
|
||||
// means we don't have to worry about peek double counting.
|
||||
func (l *lexer) lineNumber() int {
|
||||
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
||||
}
|
||||
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
l.lastPos = item.pos
|
||||
return item
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(name, input, left, right string) *lexer {
|
||||
if left == "" {
|
||||
left = leftDelim
|
||||
}
|
||||
if right == "" {
|
||||
right = rightDelim
|
||||
}
|
||||
l := &lexer{
|
||||
name: name,
|
||||
input: input,
|
||||
leftDelim: left,
|
||||
rightDelim: right,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexText; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
}
|
||||
|
||||
// state functions
|
||||
|
||||
const (
|
||||
leftDelim = "{{"
|
||||
rightDelim = "}}"
|
||||
leftComment = "/*"
|
||||
rightComment = "*/"
|
||||
)
|
||||
|
||||
// lexText scans until an opening action delimiter, "{{".
|
||||
func lexText(l *lexer) stateFn {
|
||||
for {
|
||||
if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
return lexLeftDelim
|
||||
}
|
||||
if l.next() == eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Correctly reached EOF.
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexLeftDelim scans the left delimiter, which is known to be present.
|
||||
func lexLeftDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.leftDelim))
|
||||
if strings.HasPrefix(l.input[l.pos:], leftComment) {
|
||||
return lexComment
|
||||
}
|
||||
l.emit(itemLeftDelim)
|
||||
l.parenDepth = 0
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexComment scans a comment. The left comment marker is known to be present.
|
||||
func lexComment(l *lexer) stateFn {
|
||||
l.pos += Pos(len(leftComment))
|
||||
i := strings.Index(l.input[l.pos:], rightComment)
|
||||
if i < 0 {
|
||||
return l.errorf("unclosed comment")
|
||||
}
|
||||
l.pos += Pos(i + len(rightComment))
|
||||
if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
return l.errorf("comment ends before closing delimiter")
|
||||
|
||||
}
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.ignore()
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexRightDelim scans the right delimiter, which is known to be present.
|
||||
func lexRightDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.emit(itemRightDelim)
|
||||
if l.peek() == '\\' {
|
||||
l.pos++
|
||||
l.emit(itemElideNewline)
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexInsideAction scans the elements inside action delimiters.
|
||||
func lexInsideAction(l *lexer) stateFn {
|
||||
// Either number, quoted string, or identifier.
|
||||
// Spaces separate arguments; runs of spaces turn into itemSpace.
|
||||
// Pipe symbols separate and are emitted.
|
||||
if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
if l.parenDepth == 0 {
|
||||
return lexRightDelim
|
||||
}
|
||||
return l.errorf("unclosed left paren")
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case r == eof || isEndOfLine(r):
|
||||
return l.errorf("unclosed action")
|
||||
case isSpace(r):
|
||||
return lexSpace
|
||||
case r == ':':
|
||||
if l.next() != '=' {
|
||||
return l.errorf("expected :=")
|
||||
}
|
||||
l.emit(itemColonEquals)
|
||||
case r == '|':
|
||||
l.emit(itemPipe)
|
||||
case r == '"':
|
||||
return lexQuote
|
||||
case r == '`':
|
||||
return lexRawQuote
|
||||
case r == '$':
|
||||
return lexVariable
|
||||
case r == '\'':
|
||||
return lexChar
|
||||
case r == '.':
|
||||
// special look-ahead for ".field" so we don't break l.backup().
|
||||
if l.pos < Pos(len(l.input)) {
|
||||
r := l.input[l.pos]
|
||||
if r < '0' || '9' < r {
|
||||
return lexField
|
||||
}
|
||||
}
|
||||
fallthrough // '.' can start a number.
|
||||
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case isAlphaNumeric(r):
|
||||
l.backup()
|
||||
return lexIdentifier
|
||||
case r == '(':
|
||||
l.emit(itemLeftParen)
|
||||
l.parenDepth++
|
||||
return lexInsideAction
|
||||
case r == ')':
|
||||
l.emit(itemRightParen)
|
||||
l.parenDepth--
|
||||
if l.parenDepth < 0 {
|
||||
return l.errorf("unexpected right paren %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
case r <= unicode.MaxASCII && unicode.IsPrint(r):
|
||||
l.emit(itemChar)
|
||||
return lexInsideAction
|
||||
default:
|
||||
return l.errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexSpace scans a run of space characters.
|
||||
// One space has already been seen.
|
||||
func lexSpace(l *lexer) stateFn {
|
||||
for isSpace(l.peek()) {
|
||||
l.next()
|
||||
}
|
||||
l.emit(itemSpace)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexIdentifier scans an alphanumeric.
|
||||
func lexIdentifier(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isAlphaNumeric(r):
|
||||
// absorb.
|
||||
default:
|
||||
l.backup()
|
||||
word := l.input[l.start:l.pos]
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
switch {
|
||||
case key[word] > itemKeyword:
|
||||
l.emit(key[word])
|
||||
case word[0] == '.':
|
||||
l.emit(itemField)
|
||||
case word == "true", word == "false":
|
||||
l.emit(itemBool)
|
||||
default:
|
||||
l.emit(itemIdentifier)
|
||||
}
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexField scans a field: .Alphanumeric.
|
||||
// The . has been scanned.
|
||||
func lexField(l *lexer) stateFn {
|
||||
return lexFieldOrVariable(l, itemField)
|
||||
}
|
||||
|
||||
// lexVariable scans a Variable: $Alphanumeric.
|
||||
// The $ has been scanned.
|
||||
func lexVariable(l *lexer) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "$".
|
||||
l.emit(itemVariable)
|
||||
return lexInsideAction
|
||||
}
|
||||
return lexFieldOrVariable(l, itemVariable)
|
||||
}
|
||||
|
||||
// lexVariable scans a field or variable: [.$]Alphanumeric.
|
||||
// The . or $ has been scanned.
|
||||
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
|
||||
if typ == itemVariable {
|
||||
l.emit(itemVariable)
|
||||
} else {
|
||||
l.emit(itemDot)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
var r rune
|
||||
for {
|
||||
r = l.next()
|
||||
if !isAlphaNumeric(r) {
|
||||
l.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
l.emit(typ)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// atTerminator reports whether the input is at valid termination character to
|
||||
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
|
||||
// like "$x+2" not being acceptable without a space, in case we decide one
|
||||
// day to implement arithmetic.
|
||||
func (l *lexer) atTerminator() bool {
|
||||
r := l.peek()
|
||||
if isSpace(r) || isEndOfLine(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '|', ':', ')', '(':
|
||||
return true
|
||||
}
|
||||
// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
|
||||
// succeed but should fail) but only in extremely rare cases caused by willfully
|
||||
// bad choice of delimiter.
|
||||
if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lexChar scans a character constant. The initial quote is already
|
||||
// scanned. Syntax checking is done by the parser.
|
||||
func lexChar(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated character constant")
|
||||
case '\'':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemCharConstant)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
|
||||
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
|
||||
// and "089" - but when it's wrong the input is invalid and the parser (via
|
||||
// strconv) will notice.
|
||||
func lexNumber(l *lexer) stateFn {
|
||||
if !l.scanNumber() {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
if sign := l.peek(); sign == '+' || sign == '-' {
|
||||
// Complex: 1+2i. No spaces, must end in 'i'.
|
||||
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
l.emit(itemComplex)
|
||||
} else {
|
||||
l.emit(itemNumber)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
func (l *lexer) scanNumber() bool {
|
||||
// Optional leading sign.
|
||||
l.accept("+-")
|
||||
// Is it hex?
|
||||
digits := "0123456789"
|
||||
if l.accept("0") && l.accept("xX") {
|
||||
digits = "0123456789abcdefABCDEF"
|
||||
}
|
||||
l.acceptRun(digits)
|
||||
if l.accept(".") {
|
||||
l.acceptRun(digits)
|
||||
}
|
||||
if l.accept("eE") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789")
|
||||
}
|
||||
// Is it imaginary?
|
||||
l.accept("i")
|
||||
// Next thing mustn't be alphanumeric.
|
||||
if isAlphaNumeric(l.peek()) {
|
||||
l.next()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lexQuote scans a quoted string.
|
||||
func lexQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexRawQuote scans a raw quoted string.
|
||||
func lexRawQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated raw quoted string")
|
||||
case '`':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemRawString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
||||
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
Normal file
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
Normal file
@@ -0,0 +1,834 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Parse nodes.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var textFormat = "%s" // Changed to "%q" in tests for better error messages.
|
||||
|
||||
// A Node is an element in the parse tree. The interface is trivial.
|
||||
// The interface contains an unexported method so that only
|
||||
// types local to this package can satisfy it.
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
String() string
|
||||
// Copy does a deep copy of the Node and all its components.
|
||||
// To avoid type assertions, some XxxNodes also have specialized
|
||||
// CopyXxx methods that return *XxxNode.
|
||||
Copy() Node
|
||||
Position() Pos // byte position of start of node in full original input string
|
||||
// tree returns the containing *Tree.
|
||||
// It is unexported so all implementations of Node are in this package.
|
||||
tree() *Tree
|
||||
}
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Pos represents a byte position in the original input text from which
|
||||
// this template was parsed.
|
||||
type Pos int
|
||||
|
||||
func (p Pos) Position() Pos {
|
||||
return p
|
||||
}
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
// for embedding in a Node. Embedded in all non-trivial Nodes.
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota // Plain text.
|
||||
NodeAction // A non-control action such as a field evaluation.
|
||||
NodeBool // A boolean constant.
|
||||
NodeChain // A sequence of field accesses.
|
||||
NodeCommand // An element of a pipeline.
|
||||
NodeDot // The cursor, dot.
|
||||
nodeElse // An else action. Not added to tree.
|
||||
nodeEnd // An end action. Not added to tree.
|
||||
NodeField // A field or method name.
|
||||
NodeIdentifier // An identifier; always a function name.
|
||||
NodeIf // An if action.
|
||||
NodeList // A list of Nodes.
|
||||
NodeNil // An untyped nil constant.
|
||||
NodeNumber // A numerical constant.
|
||||
NodePipe // A pipeline of commands.
|
||||
NodeRange // A range action.
|
||||
NodeString // A string constant.
|
||||
NodeTemplate // A template invocation action.
|
||||
NodeVariable // A $ variable.
|
||||
NodeWith // A with action.
|
||||
)
|
||||
|
||||
// Nodes.
|
||||
|
||||
// ListNode holds a sequence of nodes.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Nodes []Node // The element nodes in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newList(pos Pos) *ListNode {
|
||||
return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
|
||||
}
|
||||
|
||||
func (l *ListNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
func (l *ListNode) tree() *Tree {
|
||||
return l.tr
|
||||
}
|
||||
|
||||
func (l *ListNode) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, n := range l.Nodes {
|
||||
fmt.Fprint(b, n)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (l *ListNode) CopyList() *ListNode {
|
||||
if l == nil {
|
||||
return l
|
||||
}
|
||||
n := l.tr.newList(l.Pos)
|
||||
for _, elem := range l.Nodes {
|
||||
n.append(elem.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (l *ListNode) Copy() Node {
|
||||
return l.CopyList()
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Text []byte // The text; may span newlines.
|
||||
}
|
||||
|
||||
func (t *Tree) newText(pos Pos, text string) *TextNode {
|
||||
return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
|
||||
}
|
||||
|
||||
func (t *TextNode) String() string {
|
||||
return fmt.Sprintf(textFormat, t.Text)
|
||||
}
|
||||
|
||||
func (t *TextNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TextNode) Copy() Node {
|
||||
return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
|
||||
}
|
||||
|
||||
// PipeNode holds a pipeline with optional declaration
|
||||
type PipeNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Decl []*VariableNode // Variable declarations in lexical order.
|
||||
Cmds []*CommandNode // The commands in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
|
||||
return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
|
||||
}
|
||||
|
||||
func (p *PipeNode) append(command *CommandNode) {
|
||||
p.Cmds = append(p.Cmds, command)
|
||||
}
|
||||
|
||||
func (p *PipeNode) String() string {
|
||||
s := ""
|
||||
if len(p.Decl) > 0 {
|
||||
for i, v := range p.Decl {
|
||||
if i > 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += v.String()
|
||||
}
|
||||
s += " := "
|
||||
}
|
||||
for i, c := range p.Cmds {
|
||||
if i > 0 {
|
||||
s += " | "
|
||||
}
|
||||
s += c.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *PipeNode) tree() *Tree {
|
||||
return p.tr
|
||||
}
|
||||
|
||||
func (p *PipeNode) CopyPipe() *PipeNode {
|
||||
if p == nil {
|
||||
return p
|
||||
}
|
||||
var decl []*VariableNode
|
||||
for _, d := range p.Decl {
|
||||
decl = append(decl, d.Copy().(*VariableNode))
|
||||
}
|
||||
n := p.tr.newPipeline(p.Pos, p.Line, decl)
|
||||
for _, c := range p.Cmds {
|
||||
n.append(c.Copy().(*CommandNode))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *PipeNode) Copy() Node {
|
||||
return p.CopyPipe()
|
||||
}
|
||||
|
||||
// ActionNode holds an action (something bounded by delimiters).
|
||||
// Control actions have their own nodes; ActionNode represents simple
|
||||
// ones such as field evaluations and parenthesized pipelines.
|
||||
type ActionNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline in the action.
|
||||
}
|
||||
|
||||
func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
|
||||
return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (a *ActionNode) String() string {
|
||||
return fmt.Sprintf("{{%s}}", a.Pipe)
|
||||
|
||||
}
|
||||
|
||||
func (a *ActionNode) tree() *Tree {
|
||||
return a.tr
|
||||
}
|
||||
|
||||
func (a *ActionNode) Copy() Node {
|
||||
return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
|
||||
|
||||
}
|
||||
|
||||
// CommandNode holds a command (a pipeline inside an evaluating action).
|
||||
type CommandNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Args []Node // Arguments in lexical order: Identifier, field, or constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newCommand(pos Pos) *CommandNode {
|
||||
return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
|
||||
}
|
||||
|
||||
func (c *CommandNode) append(arg Node) {
|
||||
c.Args = append(c.Args, arg)
|
||||
}
|
||||
|
||||
func (c *CommandNode) String() string {
|
||||
s := ""
|
||||
for i, arg := range c.Args {
|
||||
if i > 0 {
|
||||
s += " "
|
||||
}
|
||||
if arg, ok := arg.(*PipeNode); ok {
|
||||
s += "(" + arg.String() + ")"
|
||||
continue
|
||||
}
|
||||
s += arg.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *CommandNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *CommandNode) Copy() Node {
|
||||
if c == nil {
|
||||
return c
|
||||
}
|
||||
n := c.tr.newCommand(c.Pos)
|
||||
for _, c := range c.Args {
|
||||
n.append(c.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// IdentifierNode holds an identifier.
|
||||
type IdentifierNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident string // The identifier's name.
|
||||
}
|
||||
|
||||
// NewIdentifier returns a new IdentifierNode with the given identifier name.
|
||||
func NewIdentifier(ident string) *IdentifierNode {
|
||||
return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
|
||||
}
|
||||
|
||||
// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
|
||||
i.Pos = pos
|
||||
return i
|
||||
}
|
||||
|
||||
// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
|
||||
i.tr = t
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) String() string {
|
||||
return i.Ident
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) tree() *Tree {
|
||||
return i.tr
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) Copy() Node {
|
||||
return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
|
||||
}
|
||||
|
||||
// VariableNode holds a list of variable names, possibly with chained field
|
||||
// accesses. The dollar sign is part of the (first) name.
|
||||
type VariableNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // Variable name and fields in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
|
||||
return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
|
||||
}
|
||||
|
||||
func (v *VariableNode) String() string {
|
||||
s := ""
|
||||
for i, id := range v.Ident {
|
||||
if i > 0 {
|
||||
s += "."
|
||||
}
|
||||
s += id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (v *VariableNode) tree() *Tree {
|
||||
return v.tr
|
||||
}
|
||||
|
||||
func (v *VariableNode) Copy() Node {
|
||||
return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
|
||||
}
|
||||
|
||||
// DotNode holds the special identifier '.'.
|
||||
type DotNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newDot(pos Pos) *DotNode {
|
||||
return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
|
||||
}
|
||||
|
||||
func (d *DotNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeDot
|
||||
}
|
||||
|
||||
func (d *DotNode) String() string {
|
||||
return "."
|
||||
}
|
||||
|
||||
func (d *DotNode) tree() *Tree {
|
||||
return d.tr
|
||||
}
|
||||
|
||||
func (d *DotNode) Copy() Node {
|
||||
return d.tr.newDot(d.Pos)
|
||||
}
|
||||
|
||||
// NilNode holds the special identifier 'nil' representing an untyped nil constant.
|
||||
type NilNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newNil(pos Pos) *NilNode {
|
||||
return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
|
||||
}
|
||||
|
||||
func (n *NilNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeNil
|
||||
}
|
||||
|
||||
func (n *NilNode) String() string {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
func (n *NilNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NilNode) Copy() Node {
|
||||
return n.tr.newNil(n.Pos)
|
||||
}
|
||||
|
||||
// FieldNode holds a field (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The period is dropped from each ident.
|
||||
type FieldNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newField(pos Pos, ident string) *FieldNode {
|
||||
return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
|
||||
}
|
||||
|
||||
func (f *FieldNode) String() string {
|
||||
s := ""
|
||||
for _, id := range f.Ident {
|
||||
s += "." + id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (f *FieldNode) tree() *Tree {
|
||||
return f.tr
|
||||
}
|
||||
|
||||
func (f *FieldNode) Copy() Node {
|
||||
return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
|
||||
}
|
||||
|
||||
// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The periods are dropped from each ident.
|
||||
type ChainNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Node Node
|
||||
Field []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
|
||||
return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
|
||||
}
|
||||
|
||||
// Add adds the named field (which should start with a period) to the end of the chain.
|
||||
func (c *ChainNode) Add(field string) {
|
||||
if len(field) == 0 || field[0] != '.' {
|
||||
panic("no dot in field")
|
||||
}
|
||||
field = field[1:] // Remove leading dot.
|
||||
if field == "" {
|
||||
panic("empty field")
|
||||
}
|
||||
c.Field = append(c.Field, field)
|
||||
}
|
||||
|
||||
func (c *ChainNode) String() string {
|
||||
s := c.Node.String()
|
||||
if _, ok := c.Node.(*PipeNode); ok {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
for _, field := range c.Field {
|
||||
s += "." + field
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *ChainNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *ChainNode) Copy() Node {
|
||||
return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
|
||||
}
|
||||
|
||||
// BoolNode holds a boolean constant.
|
||||
type BoolNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
True bool // The value of the boolean constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
|
||||
return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
|
||||
}
|
||||
|
||||
func (b *BoolNode) String() string {
|
||||
if b.True {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (b *BoolNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BoolNode) Copy() Node {
|
||||
return b.tr.newBool(b.Pos, b.True)
|
||||
}
|
||||
|
||||
// NumberNode holds a number: signed or unsigned integer, float, or complex.
|
||||
// The value is parsed and stored under all the types that can represent the value.
|
||||
// This simulates in a small amount of code the behavior of Go's ideal constants.
|
||||
type NumberNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
IsInt bool // Number has an integral value.
|
||||
IsUint bool // Number has an unsigned integral value.
|
||||
IsFloat bool // Number has a floating-point value.
|
||||
IsComplex bool // Number is complex.
|
||||
Int64 int64 // The signed integer value.
|
||||
Uint64 uint64 // The unsigned integer value.
|
||||
Float64 float64 // The floating-point value.
|
||||
Complex128 complex128 // The complex value.
|
||||
Text string // The original textual representation from the input.
|
||||
}
|
||||
|
||||
func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
|
||||
n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
|
||||
switch typ {
|
||||
case itemCharConstant:
|
||||
rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tail != "'" {
|
||||
return nil, fmt.Errorf("malformed character constant: %s", text)
|
||||
}
|
||||
n.Int64 = int64(rune)
|
||||
n.IsInt = true
|
||||
n.Uint64 = uint64(rune)
|
||||
n.IsUint = true
|
||||
n.Float64 = float64(rune) // odd but those are the rules.
|
||||
n.IsFloat = true
|
||||
return n, nil
|
||||
case itemComplex:
|
||||
// fmt.Sscan can parse the pair, so let it do the work.
|
||||
if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.IsComplex = true
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
// Imaginary constants can only be complex unless they are zero.
|
||||
if len(text) > 0 && text[len(text)-1] == 'i' {
|
||||
f, err := strconv.ParseFloat(text[:len(text)-1], 64)
|
||||
if err == nil {
|
||||
n.IsComplex = true
|
||||
n.Complex128 = complex(0, f)
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
// Do integer test first so we get 0x123 etc.
|
||||
u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
|
||||
if err == nil {
|
||||
n.IsUint = true
|
||||
n.Uint64 = u
|
||||
}
|
||||
i, err := strconv.ParseInt(text, 0, 64)
|
||||
if err == nil {
|
||||
n.IsInt = true
|
||||
n.Int64 = i
|
||||
if i == 0 {
|
||||
n.IsUint = true // in case of -0.
|
||||
n.Uint64 = u
|
||||
}
|
||||
}
|
||||
// If an integer extraction succeeded, promote the float.
|
||||
if n.IsInt {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Int64)
|
||||
} else if n.IsUint {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Uint64)
|
||||
} else {
|
||||
f, err := strconv.ParseFloat(text, 64)
|
||||
if err == nil {
|
||||
n.IsFloat = true
|
||||
n.Float64 = f
|
||||
// If a floating-point extraction succeeded, extract the int if needed.
|
||||
if !n.IsInt && float64(int64(f)) == f {
|
||||
n.IsInt = true
|
||||
n.Int64 = int64(f)
|
||||
}
|
||||
if !n.IsUint && float64(uint64(f)) == f {
|
||||
n.IsUint = true
|
||||
n.Uint64 = uint64(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !n.IsInt && !n.IsUint && !n.IsFloat {
|
||||
return nil, fmt.Errorf("illegal number syntax: %q", text)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// simplifyComplex pulls out any other types that are represented by the complex number.
|
||||
// These all require that the imaginary part be zero.
|
||||
func (n *NumberNode) simplifyComplex() {
|
||||
n.IsFloat = imag(n.Complex128) == 0
|
||||
if n.IsFloat {
|
||||
n.Float64 = real(n.Complex128)
|
||||
n.IsInt = float64(int64(n.Float64)) == n.Float64
|
||||
if n.IsInt {
|
||||
n.Int64 = int64(n.Float64)
|
||||
}
|
||||
n.IsUint = float64(uint64(n.Float64)) == n.Float64
|
||||
if n.IsUint {
|
||||
n.Uint64 = uint64(n.Float64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NumberNode) String() string {
|
||||
return n.Text
|
||||
}
|
||||
|
||||
func (n *NumberNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NumberNode) Copy() Node {
|
||||
nn := new(NumberNode)
|
||||
*nn = *n // Easy, fast, correct.
|
||||
return nn
|
||||
}
|
||||
|
||||
// StringNode holds a string constant. The value has been "unquoted".
|
||||
type StringNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Quoted string // The original text of the string, with quotes.
|
||||
Text string // The string, after quote processing.
|
||||
}
|
||||
|
||||
func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
|
||||
return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
|
||||
}
|
||||
|
||||
func (s *StringNode) String() string {
|
||||
return s.Quoted
|
||||
}
|
||||
|
||||
func (s *StringNode) tree() *Tree {
|
||||
return s.tr
|
||||
}
|
||||
|
||||
func (s *StringNode) Copy() Node {
|
||||
return s.tr.newString(s.Pos, s.Quoted, s.Text)
|
||||
}
|
||||
|
||||
// endNode represents an {{end}} action.
|
||||
// It does not appear in the final parse tree.
|
||||
type endNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newEnd(pos Pos) *endNode {
|
||||
return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
|
||||
}
|
||||
|
||||
func (e *endNode) String() string {
|
||||
return "{{end}}"
|
||||
}
|
||||
|
||||
func (e *endNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *endNode) Copy() Node {
|
||||
return e.tr.newEnd(e.Pos)
|
||||
}
|
||||
|
||||
// elseNode represents an {{else}} action. Does not appear in the final tree.
|
||||
type elseNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
}
|
||||
|
||||
func (t *Tree) newElse(pos Pos, line int) *elseNode {
|
||||
return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
|
||||
}
|
||||
|
||||
func (e *elseNode) Type() NodeType {
|
||||
return nodeElse
|
||||
}
|
||||
|
||||
func (e *elseNode) String() string {
|
||||
return "{{else}}"
|
||||
}
|
||||
|
||||
func (e *elseNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *elseNode) Copy() Node {
|
||||
return e.tr.newElse(e.Pos, e.Line)
|
||||
}
|
||||
|
||||
// BranchNode is the common representation of if, range, and with.
|
||||
type BranchNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline to be evaluated.
|
||||
List *ListNode // What to execute if the value is non-empty.
|
||||
ElseList *ListNode // What to execute if the value is empty (nil if absent).
|
||||
}
|
||||
|
||||
func (b *BranchNode) String() string {
|
||||
name := ""
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
name = "if"
|
||||
case NodeRange:
|
||||
name = "range"
|
||||
case NodeWith:
|
||||
name = "with"
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
if b.ElseList != nil {
|
||||
return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
|
||||
}
|
||||
return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
|
||||
}
|
||||
|
||||
func (b *BranchNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BranchNode) Copy() Node {
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeRange:
|
||||
return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeWith:
|
||||
return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
}
|
||||
|
||||
// IfNode represents an {{if}} action and its commands.
|
||||
type IfNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
|
||||
return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (i *IfNode) Copy() Node {
|
||||
return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// RangeNode represents a {{range}} action and its commands.
|
||||
type RangeNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
|
||||
return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (r *RangeNode) Copy() Node {
|
||||
return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// WithNode represents a {{with}} action and its commands.
|
||||
type WithNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
|
||||
return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (w *WithNode) Copy() Node {
|
||||
return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// TemplateNode represents a {{template}} action.
|
||||
type TemplateNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Name string // The name of the template (unquoted).
|
||||
Pipe *PipeNode // The command to evaluate as dot for the template.
|
||||
}
|
||||
|
||||
func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
|
||||
return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (t *TemplateNode) String() string {
|
||||
if t.Pipe == nil {
|
||||
return fmt.Sprintf("{{template %q}}", t.Name)
|
||||
}
|
||||
return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
|
||||
}
|
||||
|
||||
func (t *TemplateNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TemplateNode) Copy() Node {
|
||||
return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
|
||||
}
|
||||
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
Normal file
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
Normal file
@@ -0,0 +1,700 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package parse builds parse trees for templates as defined by text/template
|
||||
// and html/template. Clients should use those packages to construct templates
|
||||
// rather than this one, which provides shared internal data structures not
|
||||
// intended for general use.
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tree is the representation of a single parsed template.
|
||||
type Tree struct {
|
||||
Name string // name of the template represented by the tree.
|
||||
ParseName string // name of the top-level template during parsing, for error messages.
|
||||
Root *ListNode // top-level root of the tree.
|
||||
text string // text parsed to create the template (or its parent)
|
||||
// Parsing only; cleared after parse.
|
||||
funcs []map[string]interface{}
|
||||
lex *lexer
|
||||
token [3]item // three-token lookahead for parser.
|
||||
peekCount int
|
||||
vars []string // variables defined at the moment.
|
||||
}
|
||||
|
||||
// Copy returns a copy of the Tree. Any parsing state is discarded.
|
||||
func (t *Tree) Copy() *Tree {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Tree{
|
||||
Name: t.Name,
|
||||
ParseName: t.ParseName,
|
||||
Root: t.Root.CopyList(),
|
||||
text: t.text,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns a map from template name to parse.Tree, created by parsing the
|
||||
// templates described in the argument string. The top-level template will be
|
||||
// given the specified name. If an error is encountered, parsing stops and an
|
||||
// empty map is returned with the error.
|
||||
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
|
||||
treeSet = make(map[string]*Tree)
|
||||
t := New(name)
|
||||
t.text = text
|
||||
_, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
|
||||
return
|
||||
}
|
||||
|
||||
// next returns the next token.
|
||||
func (t *Tree) next() item {
|
||||
if t.peekCount > 0 {
|
||||
t.peekCount--
|
||||
} else {
|
||||
t.token[0] = t.lex.nextItem()
|
||||
}
|
||||
return t.token[t.peekCount]
|
||||
}
|
||||
|
||||
// backup backs the input stream up one token.
|
||||
func (t *Tree) backup() {
|
||||
t.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup2(t1 item) {
|
||||
t.token[1] = t1
|
||||
t.peekCount = 2
|
||||
}
|
||||
|
||||
// backup3 backs the input stream up three tokens
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
|
||||
t.token[1] = t1
|
||||
t.token[2] = t2
|
||||
t.peekCount = 3
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (t *Tree) peek() item {
|
||||
if t.peekCount > 0 {
|
||||
return t.token[t.peekCount-1]
|
||||
}
|
||||
t.peekCount = 1
|
||||
t.token[0] = t.lex.nextItem()
|
||||
return t.token[0]
|
||||
}
|
||||
|
||||
// nextNonSpace returns the next non-space token.
|
||||
func (t *Tree) nextNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// peekNonSpace returns but does not consume the next non-space token.
|
||||
func (t *Tree) peekNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
t.backup()
|
||||
return token
|
||||
}
|
||||
|
||||
// Parsing.
|
||||
|
||||
// New allocates a new parse tree with the given name.
|
||||
func New(name string, funcs ...map[string]interface{}) *Tree {
|
||||
return &Tree{
|
||||
Name: name,
|
||||
funcs: funcs,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorContext returns a textual representation of the location of the node in the input text.
|
||||
// The receiver is only used when the node does not have a pointer to the tree inside,
|
||||
// which can occur in old code.
|
||||
func (t *Tree) ErrorContext(n Node) (location, context string) {
|
||||
pos := int(n.Position())
|
||||
tree := n.tree()
|
||||
if tree == nil {
|
||||
tree = t
|
||||
}
|
||||
text := tree.text[:pos]
|
||||
byteNum := strings.LastIndex(text, "\n")
|
||||
if byteNum == -1 {
|
||||
byteNum = pos // On first line.
|
||||
} else {
|
||||
byteNum++ // After the newline.
|
||||
byteNum = pos - byteNum
|
||||
}
|
||||
lineNum := 1 + strings.Count(text, "\n")
|
||||
context = n.String()
|
||||
if len(context) > 20 {
|
||||
context = fmt.Sprintf("%.20s...", context)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (t *Tree) errorf(format string, args ...interface{}) {
|
||||
t.Root = nil
|
||||
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// error terminates processing.
|
||||
func (t *Tree) error(err error) {
|
||||
t.errorf("%s", err)
|
||||
}
|
||||
|
||||
// expect consumes the next token and guarantees it has the required type.
|
||||
func (t *Tree) expect(expected itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// expectOneOf consumes the next token and guarantees it has one of the required types.
|
||||
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected1 && token.typ != expected2 {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// unexpected complains about the token and terminates processing.
|
||||
func (t *Tree) unexpected(token item, context string) {
|
||||
t.errorf("unexpected %s in %s", token, context)
|
||||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||
func (t *Tree) recover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(runtime.Error); ok {
|
||||
panic(e)
|
||||
}
|
||||
if t != nil {
|
||||
t.stopParse()
|
||||
}
|
||||
*errp = e.(error)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// startParse initializes the parser, using the lexer.
|
||||
func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
|
||||
t.Root = nil
|
||||
t.lex = lex
|
||||
t.vars = []string{"$"}
|
||||
t.funcs = funcs
|
||||
}
|
||||
|
||||
// stopParse terminates parsing.
|
||||
func (t *Tree) stopParse() {
|
||||
t.lex = nil
|
||||
t.vars = nil
|
||||
t.funcs = nil
|
||||
}
|
||||
|
||||
// Parse parses the template definition string to construct a representation of
|
||||
// the template for execution. If either action delimiter string is empty, the
|
||||
// default ("{{" or "}}") is used. Embedded template definitions are added to
|
||||
// the treeSet map.
|
||||
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
|
||||
defer t.recover(&err)
|
||||
t.ParseName = t.Name
|
||||
t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
|
||||
t.text = text
|
||||
t.parse(treeSet)
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// add adds tree to the treeSet.
|
||||
func (t *Tree) add(treeSet map[string]*Tree) {
|
||||
tree := treeSet[t.Name]
|
||||
if tree == nil || IsEmptyTree(tree.Root) {
|
||||
treeSet[t.Name] = t
|
||||
return
|
||||
}
|
||||
if !IsEmptyTree(t.Root) {
|
||||
t.errorf("template: multiple definition of template %q", t.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmptyTree reports whether this tree (node) is empty of everything but space.
|
||||
func IsEmptyTree(n Node) bool {
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
return true
|
||||
case *ActionNode:
|
||||
case *IfNode:
|
||||
case *ListNode:
|
||||
for _, node := range n.Nodes {
|
||||
if !IsEmptyTree(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *RangeNode:
|
||||
case *TemplateNode:
|
||||
case *TextNode:
|
||||
return len(bytes.TrimSpace(n.Text)) == 0
|
||||
case *WithNode:
|
||||
default:
|
||||
panic("unknown node: " + n.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parse is the top-level parser for a template, essentially the same
|
||||
// as itemList except it also parses {{define}} actions.
|
||||
// It runs to EOF.
|
||||
func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
|
||||
t.Root = t.newList(t.peek().pos)
|
||||
for t.peek().typ != itemEOF {
|
||||
if t.peek().typ == itemLeftDelim {
|
||||
delim := t.next()
|
||||
if t.nextNonSpace().typ == itemDefine {
|
||||
newT := New("definition") // name will be updated once we know it.
|
||||
newT.text = t.text
|
||||
newT.ParseName = t.ParseName
|
||||
newT.startParse(t.funcs, t.lex)
|
||||
newT.parseDefinition(treeSet)
|
||||
continue
|
||||
}
|
||||
t.backup2(delim)
|
||||
}
|
||||
n := t.textOrAction()
|
||||
if n.Type() == nodeEnd {
|
||||
t.errorf("unexpected %s", n)
|
||||
}
|
||||
t.Root.append(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDefinition parses a {{define}} ... {{end}} template definition and
|
||||
// installs the definition in the treeSet map. The "define" keyword has already
|
||||
// been scanned.
|
||||
func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
|
||||
const context = "define clause"
|
||||
name := t.expectOneOf(itemString, itemRawString, context)
|
||||
var err error
|
||||
t.Name, err = strconv.Unquote(name.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
t.expect(itemRightDelim, context)
|
||||
var end Node
|
||||
t.Root, end = t.itemList()
|
||||
if end.Type() != nodeEnd {
|
||||
t.errorf("unexpected %s in %s", end, context)
|
||||
}
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
}
|
||||
|
||||
// itemList:
|
||||
// textOrAction*
|
||||
// Terminates at {{end}} or {{else}}, returned separately.
|
||||
func (t *Tree) itemList() (list *ListNode, next Node) {
|
||||
list = t.newList(t.peekNonSpace().pos)
|
||||
for t.peekNonSpace().typ != itemEOF {
|
||||
n := t.textOrAction()
|
||||
switch n.Type() {
|
||||
case nodeEnd, nodeElse:
|
||||
return list, n
|
||||
}
|
||||
list.append(n)
|
||||
}
|
||||
t.errorf("unexpected EOF")
|
||||
return
|
||||
}
|
||||
|
||||
// textOrAction:
|
||||
// text | action
|
||||
func (t *Tree) textOrAction() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElideNewline:
|
||||
return t.elideNewline()
|
||||
case itemText:
|
||||
return t.newText(token.pos, token.val)
|
||||
case itemLeftDelim:
|
||||
return t.action()
|
||||
default:
|
||||
t.unexpected(token, "input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// elideNewline:
|
||||
// Remove newlines trailing rightDelim if \\ is present.
|
||||
func (t *Tree) elideNewline() Node {
|
||||
token := t.peek()
|
||||
if token.typ != itemText {
|
||||
t.unexpected(token, "input")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.next()
|
||||
stripped := strings.TrimLeft(token.val, "\n\r")
|
||||
diff := len(token.val) - len(stripped)
|
||||
if diff > 0 {
|
||||
// This is a bit nasty. We mutate the token in-place to remove
|
||||
// preceding newlines.
|
||||
token.pos += Pos(diff)
|
||||
token.val = stripped
|
||||
}
|
||||
return t.newText(token.pos, token.val)
|
||||
}
|
||||
|
||||
// Action:
|
||||
// control
|
||||
// command ("|" command)*
|
||||
// Left delim is past. Now get actions.
|
||||
// First word could be a keyword such as range.
|
||||
func (t *Tree) action() (n Node) {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElse:
|
||||
return t.elseControl()
|
||||
case itemEnd:
|
||||
return t.endControl()
|
||||
case itemIf:
|
||||
return t.ifControl()
|
||||
case itemRange:
|
||||
return t.rangeControl()
|
||||
case itemTemplate:
|
||||
return t.templateControl()
|
||||
case itemWith:
|
||||
return t.withControl()
|
||||
}
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
|
||||
}
|
||||
|
||||
// Pipeline:
|
||||
// declarations? command ('|' command)*
|
||||
func (t *Tree) pipeline(context string) (pipe *PipeNode) {
|
||||
var decl []*VariableNode
|
||||
pos := t.peekNonSpace().pos
|
||||
// Are there declarations?
|
||||
for {
|
||||
if v := t.peekNonSpace(); v.typ == itemVariable {
|
||||
t.next()
|
||||
// Since space is a token, we need 3-token look-ahead here in the worst case:
|
||||
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
|
||||
// argument variable rather than a declaration. So remember the token
|
||||
// adjacent to the variable so we can push it back if necessary.
|
||||
tokenAfterVariable := t.peek()
|
||||
if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
|
||||
t.nextNonSpace()
|
||||
variable := t.newVariable(v.pos, v.val)
|
||||
decl = append(decl, variable)
|
||||
t.vars = append(t.vars, v.val)
|
||||
if next.typ == itemChar && next.val == "," {
|
||||
if context == "range" && len(decl) < 2 {
|
||||
continue
|
||||
}
|
||||
t.errorf("too many declarations in %s", context)
|
||||
}
|
||||
} else if tokenAfterVariable.typ == itemSpace {
|
||||
t.backup3(v, tokenAfterVariable)
|
||||
} else {
|
||||
t.backup2(v)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
|
||||
for {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemRightDelim, itemRightParen:
|
||||
if len(pipe.Cmds) == 0 {
|
||||
t.errorf("missing value for %s", context)
|
||||
}
|
||||
if token.typ == itemRightParen {
|
||||
t.backup()
|
||||
}
|
||||
return
|
||||
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
|
||||
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
|
||||
t.backup()
|
||||
pipe.append(t.command())
|
||||
default:
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
|
||||
defer t.popVars(len(t.vars))
|
||||
line = t.lex.lineNumber()
|
||||
pipe = t.pipeline(context)
|
||||
var next Node
|
||||
list, next = t.itemList()
|
||||
switch next.Type() {
|
||||
case nodeEnd: //done
|
||||
case nodeElse:
|
||||
if allowElseIf {
|
||||
// Special case for "else if". If the "else" is followed immediately by an "if",
|
||||
// the elseControl will have left the "if" token pending. Treat
|
||||
// {{if a}}_{{else if b}}_{{end}}
|
||||
// as
|
||||
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
|
||||
// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
|
||||
// is assumed. This technique works even for long if-else-if chains.
|
||||
// TODO: Should we allow else-if in with and range?
|
||||
if t.peek().typ == itemIf {
|
||||
t.next() // Consume the "if" token.
|
||||
elseList = t.newList(next.Position())
|
||||
elseList.append(t.ifControl())
|
||||
// Do not consume the next item - only one {{end}} required.
|
||||
break
|
||||
}
|
||||
}
|
||||
elseList, next = t.itemList()
|
||||
if next.Type() != nodeEnd {
|
||||
t.errorf("expected end; found %s", next)
|
||||
}
|
||||
}
|
||||
return pipe.Position(), line, pipe, list, elseList
|
||||
}
|
||||
|
||||
// If:
|
||||
// {{if pipeline}} itemList {{end}}
|
||||
// {{if pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) ifControl() Node {
|
||||
return t.newIf(t.parseControl(true, "if"))
|
||||
}
|
||||
|
||||
// Range:
|
||||
// {{range pipeline}} itemList {{end}}
|
||||
// {{range pipeline}} itemList {{else}} itemList {{end}}
|
||||
// Range keyword is past.
|
||||
func (t *Tree) rangeControl() Node {
|
||||
return t.newRange(t.parseControl(false, "range"))
|
||||
}
|
||||
|
||||
// With:
|
||||
// {{with pipeline}} itemList {{end}}
|
||||
// {{with pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) withControl() Node {
|
||||
return t.newWith(t.parseControl(false, "with"))
|
||||
}
|
||||
|
||||
// End:
|
||||
// {{end}}
|
||||
// End keyword is past.
|
||||
func (t *Tree) endControl() Node {
|
||||
return t.newEnd(t.expect(itemRightDelim, "end").pos)
|
||||
}
|
||||
|
||||
// Else:
|
||||
// {{else}}
|
||||
// Else keyword is past.
|
||||
func (t *Tree) elseControl() Node {
|
||||
// Special case for "else if".
|
||||
peek := t.peekNonSpace()
|
||||
if peek.typ == itemIf {
|
||||
// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
|
||||
return t.newElse(peek.pos, t.lex.lineNumber())
|
||||
}
|
||||
return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
|
||||
}
|
||||
|
||||
// Template:
|
||||
// {{template stringValue pipeline}}
|
||||
// Template keyword is past. The name must be something that can evaluate
|
||||
// to a string.
|
||||
func (t *Tree) templateControl() Node {
|
||||
var name string
|
||||
token := t.nextNonSpace()
|
||||
switch token.typ {
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
name = s
|
||||
default:
|
||||
t.unexpected(token, "template invocation")
|
||||
}
|
||||
var pipe *PipeNode
|
||||
if t.nextNonSpace().typ != itemRightDelim {
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
pipe = t.pipeline("template")
|
||||
}
|
||||
return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
|
||||
}
|
||||
|
||||
// command:
|
||||
// operand (space operand)*
|
||||
// space-separated arguments up to a pipeline character or right delimiter.
|
||||
// we consume the pipe character but leave the right delim to terminate the action.
|
||||
func (t *Tree) command() *CommandNode {
|
||||
cmd := t.newCommand(t.peekNonSpace().pos)
|
||||
for {
|
||||
t.peekNonSpace() // skip leading spaces.
|
||||
operand := t.operand()
|
||||
if operand != nil {
|
||||
cmd.append(operand)
|
||||
}
|
||||
switch token := t.next(); token.typ {
|
||||
case itemSpace:
|
||||
continue
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemRightDelim, itemRightParen:
|
||||
t.backup()
|
||||
case itemPipe:
|
||||
default:
|
||||
t.errorf("unexpected %s in operand; missing space?", token)
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(cmd.Args) == 0 {
|
||||
t.errorf("empty command")
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// operand:
|
||||
// term .Field*
|
||||
// An operand is a space-separated component of a command,
|
||||
// a term possibly followed by field accesses.
|
||||
// A nil return means the next item is not an operand.
|
||||
func (t *Tree) operand() Node {
|
||||
node := t.term()
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
if t.peek().typ == itemField {
|
||||
chain := t.newChain(t.peek().pos, node)
|
||||
for t.peek().typ == itemField {
|
||||
chain.Add(t.next().val)
|
||||
}
|
||||
// Compatibility with original API: If the term is of type NodeField
|
||||
// or NodeVariable, just put more fields on the original.
|
||||
// Otherwise, keep the Chain node.
|
||||
// TODO: Switch to Chains always when we can.
|
||||
switch node.Type() {
|
||||
case NodeField:
|
||||
node = t.newField(chain.Position(), chain.String())
|
||||
case NodeVariable:
|
||||
node = t.newVariable(chain.Position(), chain.String())
|
||||
default:
|
||||
node = chain
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// term:
|
||||
// literal (number, string, nil, boolean)
|
||||
// function (identifier)
|
||||
// .
|
||||
// .Field
|
||||
// $
|
||||
// '(' pipeline ')'
|
||||
// A term is a simple "expression".
|
||||
// A nil return means the next item is not a term.
|
||||
func (t *Tree) term() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemIdentifier:
|
||||
if !t.hasFunction(token.val) {
|
||||
t.errorf("function %q not defined", token.val)
|
||||
}
|
||||
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
|
||||
case itemDot:
|
||||
return t.newDot(token.pos)
|
||||
case itemNil:
|
||||
return t.newNil(token.pos)
|
||||
case itemVariable:
|
||||
return t.useVar(token.pos, token.val)
|
||||
case itemField:
|
||||
return t.newField(token.pos, token.val)
|
||||
case itemBool:
|
||||
return t.newBool(token.pos, token.val == "true")
|
||||
case itemCharConstant, itemComplex, itemNumber:
|
||||
number, err := t.newNumber(token.pos, token.val, token.typ)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return number
|
||||
case itemLeftParen:
|
||||
pipe := t.pipeline("parenthesized pipeline")
|
||||
if token := t.next(); token.typ != itemRightParen {
|
||||
t.errorf("unclosed right paren: unexpected %s", token)
|
||||
}
|
||||
return pipe
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return t.newString(token.pos, token.val, s)
|
||||
}
|
||||
t.backup()
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasFunction reports if a function name exists in the Tree's maps.
|
||||
func (t *Tree) hasFunction(name string) bool {
|
||||
for _, funcMap := range t.funcs {
|
||||
if funcMap == nil {
|
||||
continue
|
||||
}
|
||||
if funcMap[name] != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// popVars trims the variable list to the specified length
|
||||
func (t *Tree) popVars(n int) {
|
||||
t.vars = t.vars[:n]
|
||||
}
|
||||
|
||||
// useVar returns a node for a variable reference. It errors if the
|
||||
// variable is not defined.
|
||||
func (t *Tree) useVar(pos Pos, name string) Node {
|
||||
v := t.newVariable(pos, name)
|
||||
for _, varName := range t.vars {
|
||||
if varName == v.Ident[0] {
|
||||
return v
|
||||
}
|
||||
}
|
||||
t.errorf("undefined variable %q", v.Ident[0])
|
||||
return nil
|
||||
}
|
||||
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
Normal file
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
Normal file
@@ -0,0 +1,218 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// common holds the information shared by related templates.
|
||||
type common struct {
|
||||
tmpl map[string]*Template
|
||||
// We use two maps, one for parsing and one for execution.
|
||||
// This separation makes the API cleaner since it doesn't
|
||||
// expose reflection to the client.
|
||||
parseFuncs FuncMap
|
||||
execFuncs map[string]reflect.Value
|
||||
}
|
||||
|
||||
// Template is the representation of a parsed template. The *parse.Tree
|
||||
// field is exported only for use by html/template and should be treated
|
||||
// as unexported by all other clients.
|
||||
type Template struct {
|
||||
name string
|
||||
*parse.Tree
|
||||
*common
|
||||
leftDelim string
|
||||
rightDelim string
|
||||
}
|
||||
|
||||
// New allocates a new template with the given name.
|
||||
func New(name string) *Template {
|
||||
return &Template{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the template.
|
||||
func (t *Template) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// New allocates a new template associated with the given one and with the same
|
||||
// delimiters. The association, which is transitive, allows one template to
|
||||
// invoke another with a {{template}} action.
|
||||
func (t *Template) New(name string) *Template {
|
||||
t.init()
|
||||
return &Template{
|
||||
name: name,
|
||||
common: t.common,
|
||||
leftDelim: t.leftDelim,
|
||||
rightDelim: t.rightDelim,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Template) init() {
|
||||
if t.common == nil {
|
||||
t.common = new(common)
|
||||
t.tmpl = make(map[string]*Template)
|
||||
t.parseFuncs = make(FuncMap)
|
||||
t.execFuncs = make(map[string]reflect.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the template, including all associated
|
||||
// templates. The actual representation is not copied, but the name space of
|
||||
// associated templates is, so further calls to Parse in the copy will add
|
||||
// templates to the copy but not to the original. Clone can be used to prepare
|
||||
// common templates and use them with variant definitions for other templates
|
||||
// by adding the variants after the clone is made.
|
||||
func (t *Template) Clone() (*Template, error) {
|
||||
nt := t.copy(nil)
|
||||
nt.init()
|
||||
nt.tmpl[t.name] = nt
|
||||
for k, v := range t.tmpl {
|
||||
if k == t.name { // Already installed.
|
||||
continue
|
||||
}
|
||||
// The associated templates share nt's common structure.
|
||||
tmpl := v.copy(nt.common)
|
||||
nt.tmpl[k] = tmpl
|
||||
}
|
||||
for k, v := range t.parseFuncs {
|
||||
nt.parseFuncs[k] = v
|
||||
}
|
||||
for k, v := range t.execFuncs {
|
||||
nt.execFuncs[k] = v
|
||||
}
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// copy returns a shallow copy of t, with common set to the argument.
|
||||
func (t *Template) copy(c *common) *Template {
|
||||
nt := New(t.name)
|
||||
nt.Tree = t.Tree
|
||||
nt.common = c
|
||||
nt.leftDelim = t.leftDelim
|
||||
nt.rightDelim = t.rightDelim
|
||||
return nt
|
||||
}
|
||||
|
||||
// AddParseTree creates a new template with the name and parse tree
|
||||
// and associates it with t.
|
||||
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
|
||||
if t.common != nil && t.tmpl[name] != nil {
|
||||
return nil, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
nt := t.New(name)
|
||||
nt.Tree = tree
|
||||
t.tmpl[name] = nt
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// Templates returns a slice of the templates associated with t, including t
|
||||
// itself.
|
||||
func (t *Template) Templates() []*Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
// Return a slice so we don't expose the map.
|
||||
m := make([]*Template, 0, len(t.tmpl))
|
||||
for _, v := range t.tmpl {
|
||||
m = append(m, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Delims sets the action delimiters to the specified strings, to be used in
|
||||
// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
|
||||
// definitions will inherit the settings. An empty delimiter stands for the
|
||||
// corresponding default: {{ or }}.
|
||||
// The return value is the template, so calls can be chained.
|
||||
func (t *Template) Delims(left, right string) *Template {
|
||||
t.leftDelim = left
|
||||
t.rightDelim = right
|
||||
return t
|
||||
}
|
||||
|
||||
// Funcs adds the elements of the argument map to the template's function map.
|
||||
// It panics if a value in the map is not a function with appropriate return
|
||||
// type. However, it is legal to overwrite elements of the map. The return
|
||||
// value is the template, so calls can be chained.
|
||||
func (t *Template) Funcs(funcMap FuncMap) *Template {
|
||||
t.init()
|
||||
addValueFuncs(t.execFuncs, funcMap)
|
||||
addFuncs(t.parseFuncs, funcMap)
|
||||
return t
|
||||
}
|
||||
|
||||
// Lookup returns the template with the given name that is associated with t,
|
||||
// or nil if there is no such template.
|
||||
func (t *Template) Lookup(name string) *Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
return t.tmpl[name]
|
||||
}
|
||||
|
||||
// Parse parses a string into a template. Nested template definitions will be
|
||||
// associated with the top-level template t. Parse may be called multiple times
|
||||
// to parse definitions of templates to associate with t. It is an error if a
|
||||
// resulting template is non-empty (contains content other than template
|
||||
// definitions) and would replace a non-empty template with the same name.
|
||||
// (In multiple calls to Parse with the same receiver template, only one call
|
||||
// can contain text other than space, comments, and template definitions.)
|
||||
func (t *Template) Parse(text string) (*Template, error) {
|
||||
t.init()
|
||||
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the newly parsed trees, including the one for t, into our common structure.
|
||||
for name, tree := range trees {
|
||||
// If the name we parsed is the name of this template, overwrite this template.
|
||||
// The associate method checks it's not a redefinition.
|
||||
tmpl := t
|
||||
if name != t.name {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
// Even if t == tmpl, we need to install it in the common.tmpl map.
|
||||
if replace, err := t.associate(tmpl, tree); err != nil {
|
||||
return nil, err
|
||||
} else if replace {
|
||||
tmpl.Tree = tree
|
||||
}
|
||||
tmpl.leftDelim = t.leftDelim
|
||||
tmpl.rightDelim = t.rightDelim
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// associate installs the new template into the group of templates associated
|
||||
// with t. It is an error to reuse a name except to overwrite an empty
|
||||
// template. The two are already known to share the common structure.
|
||||
// The boolean return value reports wither to store this tree as t.Tree.
|
||||
func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
|
||||
if new.common != t.common {
|
||||
panic("internal error: associate not common")
|
||||
}
|
||||
name := new.name
|
||||
if old := t.tmpl[name]; old != nil {
|
||||
oldIsEmpty := parse.IsEmptyTree(old.Root)
|
||||
newIsEmpty := parse.IsEmptyTree(tree.Root)
|
||||
if newIsEmpty {
|
||||
// Whether old is empty or not, new is empty; no reason to replace old.
|
||||
return false, nil
|
||||
}
|
||||
if !oldIsEmpty {
|
||||
return false, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
}
|
||||
t.tmpl[name] = new
|
||||
return true, nil
|
||||
}
|
||||
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
Normal file
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Alec Thomas
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
Normal file
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
package units
|
||||
|
||||
// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
|
||||
// etc.).
|
||||
type Base2Bytes int64
|
||||
|
||||
// Base-2 byte units.
|
||||
const (
|
||||
Kibibyte Base2Bytes = 1024
|
||||
KiB = Kibibyte
|
||||
Mebibyte = Kibibyte * 1024
|
||||
MiB = Mebibyte
|
||||
Gibibyte = Mebibyte * 1024
|
||||
GiB = Gibibyte
|
||||
Tebibyte = Gibibyte * 1024
|
||||
TiB = Tebibyte
|
||||
Pebibyte = Tebibyte * 1024
|
||||
PiB = Pebibyte
|
||||
Exbibyte = Pebibyte * 1024
|
||||
EiB = Exbibyte
|
||||
)
|
||||
|
||||
var (
|
||||
bytesUnitMap = MakeUnitMap("iB", "B", 1024)
|
||||
oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
|
||||
)
|
||||
|
||||
// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
|
||||
// and KiB are both 1024.
|
||||
func ParseBase2Bytes(s string) (Base2Bytes, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, oldBytesUnitMap)
|
||||
}
|
||||
return Base2Bytes(n), err
|
||||
}
|
||||
|
||||
func (b Base2Bytes) String() string {
|
||||
return ToString(int64(b), 1024, "iB", "B")
|
||||
}
|
||||
|
||||
var (
|
||||
metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
|
||||
)
|
||||
|
||||
// MetricBytes are SI byte units (1000 bytes in a kilobyte).
|
||||
type MetricBytes SI
|
||||
|
||||
// SI base-10 byte units.
|
||||
const (
|
||||
Kilobyte MetricBytes = 1000
|
||||
KB = Kilobyte
|
||||
Megabyte = Kilobyte * 1000
|
||||
MB = Megabyte
|
||||
Gigabyte = Megabyte * 1000
|
||||
GB = Gigabyte
|
||||
Terabyte = Gigabyte * 1000
|
||||
TB = Terabyte
|
||||
Petabyte = Terabyte * 1000
|
||||
PB = Petabyte
|
||||
Exabyte = Petabyte * 1000
|
||||
EB = Exabyte
|
||||
)
|
||||
|
||||
// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
|
||||
func ParseMetricBytes(s string) (MetricBytes, error) {
|
||||
n, err := ParseUnit(s, metricBytesUnitMap)
|
||||
return MetricBytes(n), err
|
||||
}
|
||||
|
||||
func (m MetricBytes) String() string {
|
||||
return ToString(int64(m), 1000, "B", "B")
|
||||
}
|
||||
|
||||
// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
|
||||
// respectively. That is, KiB represents 1024 and KB represents 1000.
|
||||
func ParseStrictBytes(s string) (int64, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, metricBytesUnitMap)
|
||||
}
|
||||
return int64(n), err
|
||||
}
|
||||
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
Normal file
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Package units provides helpful unit multipliers and functions for Go.
|
||||
//
|
||||
// The goal of this package is to have functionality similar to the time [1] package.
|
||||
//
|
||||
//
|
||||
// [1] http://golang.org/pkg/time/
|
||||
//
|
||||
// It allows for code like this:
|
||||
//
|
||||
// n, err := ParseBase2Bytes("1KB")
|
||||
// // n == 1024
|
||||
// n = units.Mebibyte * 512
|
||||
package units
|
||||
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
Normal file
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
package units
|
||||
|
||||
// SI units.
|
||||
type SI int64
|
||||
|
||||
// SI unit multiples.
|
||||
const (
|
||||
Kilo SI = 1000
|
||||
Mega = Kilo * 1000
|
||||
Giga = Mega * 1000
|
||||
Tera = Giga * 1000
|
||||
Peta = Tera * 1000
|
||||
Exa = Peta * 1000
|
||||
)
|
||||
|
||||
func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
shortSuffix: 1,
|
||||
"K" + suffix: float64(scale),
|
||||
"M" + suffix: float64(scale * scale),
|
||||
"G" + suffix: float64(scale * scale * scale),
|
||||
"T" + suffix: float64(scale * scale * scale * scale),
|
||||
"P" + suffix: float64(scale * scale * scale * scale * scale),
|
||||
"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
|
||||
}
|
||||
}
|
||||
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
Normal file
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
|
||||
)
|
||||
|
||||
func ToString(n int64, scale int64, suffix, baseSuffix string) string {
|
||||
mn := len(siUnits)
|
||||
out := make([]string, mn)
|
||||
for i, m := range siUnits {
|
||||
if n%scale != 0 || i == 0 && n == 0 {
|
||||
s := suffix
|
||||
if i == 0 {
|
||||
s = baseSuffix
|
||||
}
|
||||
out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
|
||||
}
|
||||
n /= scale
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return strings.Join(out, "")
|
||||
}
|
||||
|
||||
// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
|
||||
var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
|
||||
|
||||
// leadingInt consumes the leading [0-9]* from s.
|
||||
func leadingInt(s string) (x int64, rem string, err error) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
if x >= (1<<63-10)/10 {
|
||||
// overflow
|
||||
return 0, "", errLeadingInt
|
||||
}
|
||||
x = x*10 + int64(c) - '0'
|
||||
}
|
||||
return x, s[i:], nil
|
||||
}
|
||||
|
||||
func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
|
||||
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
|
||||
orig := s
|
||||
f := float64(0)
|
||||
neg := false
|
||||
|
||||
// Consume [-+]?
|
||||
if s != "" {
|
||||
c := s[0]
|
||||
if c == '-' || c == '+' {
|
||||
neg = c == '-'
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
// Special case: if all that is left is "0", this is zero.
|
||||
if s == "0" {
|
||||
return 0, nil
|
||||
}
|
||||
if s == "" {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
for s != "" {
|
||||
g := float64(0) // this element of the sequence
|
||||
|
||||
var x int64
|
||||
var err error
|
||||
|
||||
// The next character must be [0-9.]
|
||||
if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
// Consume [0-9]*
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
g = float64(x)
|
||||
pre := pl != len(s) // whether we consumed anything before a period
|
||||
|
||||
// Consume (\.[0-9]*)?
|
||||
post := false
|
||||
if s != "" && s[0] == '.' {
|
||||
s = s[1:]
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
scale := 1.0
|
||||
for n := pl - len(s); n > 0; n-- {
|
||||
scale *= 10
|
||||
}
|
||||
g += float64(x) / scale
|
||||
post = pl != len(s)
|
||||
}
|
||||
if !pre && !post {
|
||||
// no digits (e.g. ".s" or "-.s")
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
|
||||
// Consume unit.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c == '.' || ('0' <= c && c <= '9') {
|
||||
break
|
||||
}
|
||||
}
|
||||
u := s[:i]
|
||||
s = s[i:]
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, errors.New("units: unknown unit " + u + " in " + orig)
|
||||
}
|
||||
|
||||
f += g * unit
|
||||
}
|
||||
|
||||
if neg {
|
||||
f = -f
|
||||
}
|
||||
if f < float64(-1<<63) || f > float64(1<<63-1) {
|
||||
return 0, errors.New("units: overflow parsing unit")
|
||||
}
|
||||
return int64(f), nil
|
||||
}
|
||||
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
49
vendor/github.com/go-ole/go-ole/ChangeLog.md
generated
vendored
49
vendor/github.com/go-ole/go-ole/ChangeLog.md
generated
vendored
@@ -1,49 +0,0 @@
|
||||
# Version 1.x.x
|
||||
|
||||
* **Add more test cases and reference new test COM server project.** (Placeholder for future additions)
|
||||
|
||||
# Version 1.2.0-alphaX
|
||||
|
||||
**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.**
|
||||
|
||||
* Added CI configuration for Travis-CI and AppVeyor.
|
||||
* Added test InterfaceID and ClassID for the COM Test Server project.
|
||||
* Added more inline documentation (#83).
|
||||
* Added IEnumVARIANT implementation (#88).
|
||||
* Added IEnumVARIANT test cases (#99, #100, #101).
|
||||
* Added support for retrieving `time.Time` from VARIANT (#92).
|
||||
* Added test case for IUnknown (#64).
|
||||
* Added test case for IDispatch (#64).
|
||||
* Added test cases for scalar variants (#64, #76).
|
||||
|
||||
# Version 1.1.1
|
||||
|
||||
* Fixes for Linux build.
|
||||
* Fixes for Windows build.
|
||||
|
||||
# Version 1.1.0
|
||||
|
||||
The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes.
|
||||
|
||||
* Move GUID out of variables.go into its own file to make new documentation available.
|
||||
* Move OleError out of ole.go into its own file to make new documentation available.
|
||||
* Add documentation to utility functions.
|
||||
* Add documentation to variant receiver functions.
|
||||
* Add documentation to ole structures.
|
||||
* Make variant available to other systems outside of Windows.
|
||||
* Make OLE structures available to other systems outside of Windows.
|
||||
|
||||
## New Features
|
||||
|
||||
* Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows.
|
||||
* More functions are now documented and available on godoc.org.
|
||||
|
||||
# Version 1.0.1
|
||||
|
||||
1. Fix package references from repository location change.
|
||||
|
||||
# Version 1.0.0
|
||||
|
||||
This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface.
|
||||
|
||||
There is no changelog for this version. Check commits for history.
|
||||
21
vendor/github.com/go-ole/go-ole/LICENSE
generated
vendored
Normal file
21
vendor/github.com/go-ole/go-ole/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright © 2013-2017 Yasuhiro Matsumoto, <mattn.jp@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the “Software”), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
46
vendor/github.com/go-ole/go-ole/README.md
generated
vendored
46
vendor/github.com/go-ole/go-ole/README.md
generated
vendored
@@ -1,46 +0,0 @@
|
||||
#Go OLE
|
||||
|
||||
[](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28)
|
||||
[](https://travis-ci.org/go-ole/go-ole)
|
||||
[](https://godoc.org/github.com/go-ole/go-ole)
|
||||
|
||||
Go bindings for Windows COM using shared libraries instead of cgo.
|
||||
|
||||
By Yasuhiro Matsumoto.
|
||||
|
||||
## Install
|
||||
|
||||
To experiment with go-ole, you can just compile and run the example program:
|
||||
|
||||
```
|
||||
go get github.com/go-ole/go-ole
|
||||
cd /path/to/go-ole/
|
||||
go test
|
||||
|
||||
cd /path/to/go-ole/example/excel
|
||||
go run excel.go
|
||||
```
|
||||
|
||||
## Continuous Integration
|
||||
|
||||
Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run.
|
||||
|
||||
**Travis-CI**
|
||||
|
||||
Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server.
|
||||
|
||||
**AppVeyor**
|
||||
|
||||
AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server.
|
||||
|
||||
The tests currently do run and do pass and this should be maintained with commits.
|
||||
|
||||
##Versioning
|
||||
|
||||
Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch.
|
||||
|
||||
This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed.
|
||||
|
||||
##LICENSE
|
||||
|
||||
Under the MIT License: http://mattn.mit-license.org/2013
|
||||
63
vendor/github.com/go-ole/go-ole/appveyor.yml
generated
vendored
63
vendor/github.com/go-ole/go-ole/appveyor.yml
generated
vendored
@@ -1,63 +0,0 @@
|
||||
# Notes:
|
||||
# - Minimal appveyor.yml file is an empty file. All sections are optional.
|
||||
# - Indent each level of configuration with 2 spaces. Do not use tabs!
|
||||
# - All section names are case-sensitive.
|
||||
# - Section names should be unique on each level.
|
||||
|
||||
version: "1.3.0.{build}-alpha-{branch}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- v1.2
|
||||
- v1.1
|
||||
- v1.0
|
||||
|
||||
skip_tags: true
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\go-ole\go-ole
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
matrix:
|
||||
- GOARCH: amd64
|
||||
GOVERSION: 1.4
|
||||
GOROOT: c:\go
|
||||
DOWNLOADPLATFORM: "x64"
|
||||
|
||||
install:
|
||||
- choco install mingw
|
||||
- SET PATH=c:\tools\mingw64\bin;%PATH%
|
||||
# - Download COM Server
|
||||
- ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip"
|
||||
- 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL
|
||||
- c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat
|
||||
# - set
|
||||
- go version
|
||||
- go env
|
||||
- c:\gopath\src\github.com\go-ole\go-ole\build\compile-go.bat
|
||||
- go tool dist install -v cmd/8a
|
||||
- go tool dist install -v cmd/8c
|
||||
- go tool dist install -v cmd/8g
|
||||
- go tool dist install -v cmd/8l
|
||||
- go tool dist install -v cmd/6a
|
||||
- go tool dist install -v cmd/6c
|
||||
- go tool dist install -v cmd/6g
|
||||
- go tool dist install -v cmd/6l
|
||||
- go get -u golang.org/x/tools/cmd/cover
|
||||
- go get -u golang.org/x/tools/cmd/godoc
|
||||
- go get -u golang.org/x/tools/cmd/stringer
|
||||
|
||||
build_script:
|
||||
- cd c:\gopath\src\github.com\go-ole\go-ole
|
||||
- go get -v -t ./...
|
||||
- go build
|
||||
- go test -v -cover ./...
|
||||
|
||||
# disable automatic tests
|
||||
test: off
|
||||
|
||||
# disable deployment
|
||||
deploy: off
|
||||
4
vendor/github.com/go-ole/go-ole/com.go
generated
vendored
4
vendor/github.com/go-ole/go-ole/com.go
generated
vendored
@@ -321,9 +321,9 @@ func DispatchMessage(msg *Msg) (ret int32) {
|
||||
// GetVariantDate converts COM Variant Time value to Go time.Time.
|
||||
func GetVariantDate(value float64) (time.Time, error) {
|
||||
var st syscall.Systemtime
|
||||
r, _, _ := procVariantTimeToSystemTime.Call(uintptr(unsafe.Pointer(&value)), uintptr(unsafe.Pointer(&st)))
|
||||
r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st)))
|
||||
if r != 0 {
|
||||
return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), nil), nil
|
||||
return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
|
||||
}
|
||||
return time.Now(), errors.New("Could not convert to time, passing current time.")
|
||||
}
|
||||
|
||||
4
vendor/github.com/go-ole/go-ole/idispatch_windows.go
generated
vendored
4
vendor/github.com/go-ole/go-ole/idispatch_windows.go
generated
vendored
@@ -63,6 +63,10 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}
|
||||
dispnames := [1]int32{DISPID_PROPERTYPUT}
|
||||
dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0]))
|
||||
dispparams.cNamedArgs = 1
|
||||
} else if dispatch&DISPATCH_PROPERTYPUTREF != 0 {
|
||||
dispnames := [1]int32{DISPID_PROPERTYPUT}
|
||||
dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0]))
|
||||
dispparams.cNamedArgs = 1
|
||||
}
|
||||
var vargs []VARIANT
|
||||
if len(params) > 0 {
|
||||
|
||||
10
vendor/github.com/go-ole/go-ole/ole.go
generated
vendored
10
vendor/github.com/go-ole/go-ole/ole.go
generated
vendored
@@ -26,6 +26,16 @@ type EXCEPINFO struct {
|
||||
scode uint32
|
||||
}
|
||||
|
||||
// WCode return wCode in EXCEPINFO.
|
||||
func (e EXCEPINFO) WCode() uint16 {
|
||||
return e.wCode
|
||||
}
|
||||
|
||||
// SCODE return scode in EXCEPINFO.
|
||||
func (e EXCEPINFO) SCODE() uint32 {
|
||||
return e.scode
|
||||
}
|
||||
|
||||
// String convert EXCEPINFO to string.
|
||||
func (e EXCEPINFO) String() string {
|
||||
var src, desc, hlp string
|
||||
|
||||
1
vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go
generated
vendored
1
vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go
generated
vendored
@@ -49,6 +49,7 @@ func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cooki
|
||||
point.Release()
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
container.Release()
|
||||
|
||||
38
vendor/github.com/go-ole/go-ole/oleutil/oleutil.go
generated
vendored
38
vendor/github.com/go-ole/go-ole/oleutil/oleutil.go
generated
vendored
@@ -87,3 +87,41 @@ func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (r
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// PutPropertyRef mutates property reference.
|
||||
func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) {
|
||||
return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params)
|
||||
}
|
||||
|
||||
// MustPutPropertyRef mutates property reference or panics.
|
||||
func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) {
|
||||
r, err := PutPropertyRef(disp, name, params...)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error {
|
||||
newEnum, err := disp.GetProperty("_NewEnum")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer newEnum.Clear()
|
||||
|
||||
enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer enum.Release()
|
||||
|
||||
for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ferr := f(&item); ferr != nil {
|
||||
return ferr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
@@ -1,43 +0,0 @@
|
||||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
test: install generate-test-pbs
|
||||
go test
|
||||
|
||||
|
||||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
|
||||
make
|
||||
151
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
151
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||
// of such fields are preserved in the Message. This allows a later call to
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, ok := extendable(m); ok {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
4
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
4
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
@@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int {
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
|
||||
}
|
||||
|
||||
func sizeZigzag64(x uint64) int {
|
||||
return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
|
||||
1
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
1
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
@@ -73,7 +73,6 @@ for a protocol buffer variable v:
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Getters are only generated for message and oneof fields.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
|
||||
2
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
2
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
@@ -865,7 +865,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
|
||||
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
@@ -1,7 +0,0 @@
|
||||
all:
|
||||
|
||||
cover:
|
||||
go test -cover -v -coverprofile=cover.dat ./...
|
||||
go tool cover -func cover.dat
|
||||
|
||||
.PHONY: cover
|
||||
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
Normal file
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||
Julius Volz in 2012.
|
||||
|
||||
Maintainers of this repository:
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Daniel Bornkessel <daniel@soundcloud.com>
|
||||
* Jeff Younker <jeff@drinktomi.com>
|
||||
* Julius Volz <julius.volz@gmail.com>
|
||||
* Matt T. Proud <matt.proud@gmail.com>
|
||||
* Tobias Schmidt <ts@soundcloud.com>
|
||||
|
||||
1
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
@@ -1 +0,0 @@
|
||||
See [](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
||||
49
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
49
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
@@ -267,7 +267,12 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
|
||||
now := time.Now()
|
||||
|
||||
delegate := &responseWriterDelegator{ResponseWriter: w}
|
||||
out := computeApproximateRequestSize(r)
|
||||
out := make(chan int)
|
||||
urlLen := 0
|
||||
if r.URL != nil {
|
||||
urlLen = len(r.URL.String())
|
||||
}
|
||||
go computeApproximateRequestSize(r, out, urlLen)
|
||||
|
||||
_, cn := w.(http.CloseNotifier)
|
||||
_, fl := w.(http.Flusher)
|
||||
@@ -292,37 +297,23 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
|
||||
})
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
||||
// Get URL length in current go routine for avoiding a race condition.
|
||||
// HandlerFunc that runs in parallel may modify the URL.
|
||||
s := 0
|
||||
if r.URL != nil {
|
||||
s += len(r.URL.String())
|
||||
func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
|
||||
out := make(chan int, 1)
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
go func() {
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
out <- s
|
||||
close(out)
|
||||
}()
|
||||
|
||||
return out
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
out <- s
|
||||
}
|
||||
|
||||
type responseWriterDelegator struct {
|
||||
|
||||
201
vendor/github.com/prometheus/client_model/ruby/LICENSE
generated
vendored
Normal file
201
vendor/github.com/prometheus/client_model/ruby/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
47
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
47
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@@ -31,6 +31,7 @@ type Decoder interface {
|
||||
Decode(*dto.MetricFamily) error
|
||||
}
|
||||
|
||||
// DecodeOptions contains options used by the Decoder and in sample extraction.
|
||||
type DecodeOptions struct {
|
||||
// Timestamp is added to each value from the stream that has no explicit timestamp set.
|
||||
Timestamp model.Time
|
||||
@@ -142,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SampleDecoder wraps a Decoder to extract samples from the metric families
|
||||
// decoded by the wrapped Decoder.
|
||||
type SampleDecoder struct {
|
||||
Dec Decoder
|
||||
Opts *DecodeOptions
|
||||
@@ -149,37 +152,51 @@ type SampleDecoder struct {
|
||||
f dto.MetricFamily
|
||||
}
|
||||
|
||||
// Decode calls the Decode method of the wrapped Decoder and then extracts the
|
||||
// samples from the decoded MetricFamily into the provided model.Vector.
|
||||
func (sd *SampleDecoder) Decode(s *model.Vector) error {
|
||||
if err := sd.Dec.Decode(&sd.f); err != nil {
|
||||
err := sd.Dec.Decode(&sd.f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*s = extractSamples(&sd.f, sd.Opts)
|
||||
return nil
|
||||
*s, err = extractSamples(&sd.f, sd.Opts)
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract samples builds a slice of samples from the provided metric families.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
|
||||
var all model.Vector
|
||||
// ExtractSamples builds a slice of samples from the provided metric
|
||||
// families. If an error occurs during sample extraction, it continues to
|
||||
// extract from the remaining metric families. The returned error is the last
|
||||
// error that has occured.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
|
||||
var (
|
||||
all model.Vector
|
||||
lastErr error
|
||||
)
|
||||
for _, f := range fams {
|
||||
all = append(all, extractSamples(f, o)...)
|
||||
some, err := extractSamples(f, o)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
all = append(all, some...)
|
||||
}
|
||||
return all
|
||||
return all, lastErr
|
||||
}
|
||||
|
||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
|
||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
|
||||
switch f.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
return extractCounter(o, f)
|
||||
return extractCounter(o, f), nil
|
||||
case dto.MetricType_GAUGE:
|
||||
return extractGauge(o, f)
|
||||
return extractGauge(o, f), nil
|
||||
case dto.MetricType_SUMMARY:
|
||||
return extractSummary(o, f)
|
||||
return extractSummary(o, f), nil
|
||||
case dto.MetricType_UNTYPED:
|
||||
return extractUntyped(o, f)
|
||||
return extractUntyped(o, f), nil
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return extractHistogram(o, f)
|
||||
return extractHistogram(o, f), nil
|
||||
}
|
||||
panic("expfmt.extractSamples: unknown metric family type")
|
||||
return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
|
||||
}
|
||||
|
||||
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||
|
||||
12
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
12
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@@ -11,27 +11,25 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// A package for reading and writing Prometheus metrics.
|
||||
// Package expfmt contains tools for reading and writing Prometheus metrics.
|
||||
package expfmt
|
||||
|
||||
// Format specifies the HTTP content type of the different wire protocols.
|
||||
type Format string
|
||||
|
||||
// Constants to assemble the Content-Type values for the different wire protocols.
|
||||
const (
|
||||
TextVersion = "0.0.4"
|
||||
|
||||
TextVersion = "0.0.4"
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
|
||||
// The Content-Type values for the different wire protocols.
|
||||
FmtUnknown Format = `<unknown>`
|
||||
FmtText Format = `text/plain; version=` + TextVersion
|
||||
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
|
||||
// fmtJSON2 is hidden as it is deprecated.
|
||||
fmtJSON2 Format = `application/json; version=0.0.2`
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
4
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@@ -315,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn {
|
||||
if p.readTokenAsLabelValue(); p.err != nil {
|
||||
return nil
|
||||
}
|
||||
if !model.LabelValue(p.currentToken.String()).IsValid() {
|
||||
p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
p.currentLabelPair.Value = proto.String(p.currentToken.String())
|
||||
// Special treatment of summaries:
|
||||
// - Quantile labels are special, will result in dto.Quantile later.
|
||||
|
||||
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
@@ -1,67 +0,0 @@
|
||||
PACKAGE
|
||||
|
||||
package goautoneg
|
||||
import "bitbucket.org/ww/goautoneg"
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
FUNCTIONS
|
||||
|
||||
func Negotiate(header string, alternatives []string) (content_type string)
|
||||
Negotiate the most appropriate content_type given the accept header
|
||||
and a list of alternatives.
|
||||
|
||||
func ParseAccept(header string) (accept []Accept)
|
||||
Parse an Accept Header string returning a sorted list
|
||||
of clauses
|
||||
|
||||
|
||||
TYPES
|
||||
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float32
|
||||
Params map[string]string
|
||||
}
|
||||
Structure to represent a clause in an HTTP Accept Header
|
||||
|
||||
|
||||
SUBDIRECTORIES
|
||||
|
||||
.hg
|
||||
10
vendor/github.com/prometheus/common/log/eventlog_formatter.go
generated
vendored
10
vendor/github.com/prometheus/common/log/eventlog_formatter.go
generated
vendored
@@ -21,22 +21,22 @@ import (
|
||||
|
||||
"golang.org/x/sys/windows/svc/eventlog"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
setEventlogFormatter = func(name string, debugAsInfo bool) error {
|
||||
setEventlogFormatter = func(l logger, name string, debugAsInfo bool) error {
|
||||
if name == "" {
|
||||
return fmt.Errorf("missing name parameter")
|
||||
}
|
||||
|
||||
fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
|
||||
fmter, err := newEventlogger(name, debugAsInfo, l.entry.Logger.Formatter)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
|
||||
origLogger.Errorf("can't connect logger to eventlog: %v", err)
|
||||
l.Errorf("can't connect logger to eventlog: %v", err)
|
||||
return err
|
||||
}
|
||||
origLogger.Formatter = fmter
|
||||
l.entry.Logger.Formatter = fmter
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
167
vendor/github.com/prometheus/common/log/log.go
generated
vendored
167
vendor/github.com/prometheus/common/log/log.go
generated
vendored
@@ -14,7 +14,6 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -25,106 +24,46 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
type levelFlag string
|
||||
|
||||
// String implements flag.Value.
|
||||
func (f levelFlag) String() string {
|
||||
return fmt.Sprintf("%q", string(f))
|
||||
}
|
||||
|
||||
// Set implements flag.Value.
|
||||
func (f levelFlag) Set(level string) error {
|
||||
l, err := logrus.ParseLevel(level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origLogger.Level = l
|
||||
return nil
|
||||
}
|
||||
|
||||
// setSyslogFormatter is nil if the target architecture does not support syslog.
|
||||
var setSyslogFormatter func(string, string) error
|
||||
var setSyslogFormatter func(logger, string, string) error
|
||||
|
||||
// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
|
||||
var setEventlogFormatter func(string, bool) error
|
||||
var setEventlogFormatter func(logger, string, bool) error
|
||||
|
||||
func setJSONFormatter() {
|
||||
origLogger.Formatter = &logrus.JSONFormatter{}
|
||||
}
|
||||
|
||||
type logFormatFlag url.URL
|
||||
|
||||
// String implements flag.Value.
|
||||
func (f logFormatFlag) String() string {
|
||||
u := url.URL(f)
|
||||
return fmt.Sprintf("%q", u.String())
|
||||
type loggerSettings struct {
|
||||
level string
|
||||
format string
|
||||
}
|
||||
|
||||
// Set implements flag.Value.
|
||||
func (f logFormatFlag) Set(format string) error {
|
||||
u, err := url.Parse(format)
|
||||
func (s *loggerSettings) apply(ctx *kingpin.ParseContext) error {
|
||||
err := baseLogger.SetLevel(s.level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u.Scheme != "logger" {
|
||||
return fmt.Errorf("invalid scheme %s", u.Scheme)
|
||||
}
|
||||
jsonq := u.Query().Get("json")
|
||||
if jsonq == "true" {
|
||||
setJSONFormatter()
|
||||
}
|
||||
|
||||
switch u.Opaque {
|
||||
case "syslog":
|
||||
if setSyslogFormatter == nil {
|
||||
return fmt.Errorf("system does not support syslog")
|
||||
}
|
||||
appname := u.Query().Get("appname")
|
||||
facility := u.Query().Get("local")
|
||||
return setSyslogFormatter(appname, facility)
|
||||
case "eventlog":
|
||||
if setEventlogFormatter == nil {
|
||||
return fmt.Errorf("system does not support eventlog")
|
||||
}
|
||||
name := u.Query().Get("name")
|
||||
debugAsInfo := false
|
||||
debugAsInfoRaw := u.Query().Get("debugAsInfo")
|
||||
if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
|
||||
debugAsInfo = parsedDebugAsInfo
|
||||
}
|
||||
return setEventlogFormatter(name, debugAsInfo)
|
||||
case "stdout":
|
||||
origLogger.Out = os.Stdout
|
||||
case "stderr":
|
||||
origLogger.Out = os.Stderr
|
||||
default:
|
||||
return fmt.Errorf("unsupported logger %q", u.Opaque)
|
||||
}
|
||||
return nil
|
||||
err = baseLogger.SetFormat(s.format)
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
AddFlags(flag.CommandLine)
|
||||
}
|
||||
|
||||
// AddFlags adds the flags used by this package to the given FlagSet. That's
|
||||
// useful if working with a custom FlagSet. The init function of this package
|
||||
// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
|
||||
// flag.Parse() to make the logging flags take effect.
|
||||
func AddFlags(fs *flag.FlagSet) {
|
||||
fs.Var(
|
||||
levelFlag(origLogger.Level.String()),
|
||||
"log.level",
|
||||
"Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
|
||||
)
|
||||
fs.Var(
|
||||
logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}),
|
||||
"log.format",
|
||||
`Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`,
|
||||
)
|
||||
// AddFlags adds the flags used by this package to the Kingpin application.
|
||||
// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
|
||||
func AddFlags(a *kingpin.Application) {
|
||||
s := loggerSettings{}
|
||||
a.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]").
|
||||
Default(origLogger.Level.String()).
|
||||
StringVar(&s.level)
|
||||
defaultFormat := url.URL{Scheme: "logger", Opaque: "stderr"}
|
||||
a.Flag("log.format", `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`).
|
||||
Default(defaultFormat.String()).
|
||||
StringVar(&s.format)
|
||||
a.Action(s.apply)
|
||||
}
|
||||
|
||||
// Logger is the interface for loggers used in the Prometheus components.
|
||||
@@ -150,6 +89,9 @@ type Logger interface {
|
||||
Fatalf(string, ...interface{})
|
||||
|
||||
With(key string, value interface{}) Logger
|
||||
|
||||
SetFormat(string) error
|
||||
SetLevel(string) error
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
@@ -235,6 +177,58 @@ func (l logger) Fatalf(format string, args ...interface{}) {
|
||||
l.sourced().Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (l logger) SetLevel(level string) error {
|
||||
lvl, err := logrus.ParseLevel(level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.entry.Logger.Level = lvl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l logger) SetFormat(format string) error {
|
||||
u, err := url.Parse(format)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u.Scheme != "logger" {
|
||||
return fmt.Errorf("invalid scheme %s", u.Scheme)
|
||||
}
|
||||
jsonq := u.Query().Get("json")
|
||||
if jsonq == "true" {
|
||||
setJSONFormatter()
|
||||
}
|
||||
|
||||
switch u.Opaque {
|
||||
case "syslog":
|
||||
if setSyslogFormatter == nil {
|
||||
return fmt.Errorf("system does not support syslog")
|
||||
}
|
||||
appname := u.Query().Get("appname")
|
||||
facility := u.Query().Get("local")
|
||||
return setSyslogFormatter(l, appname, facility)
|
||||
case "eventlog":
|
||||
if setEventlogFormatter == nil {
|
||||
return fmt.Errorf("system does not support eventlog")
|
||||
}
|
||||
name := u.Query().Get("name")
|
||||
debugAsInfo := false
|
||||
debugAsInfoRaw := u.Query().Get("debugAsInfo")
|
||||
if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
|
||||
debugAsInfo = parsedDebugAsInfo
|
||||
}
|
||||
return setEventlogFormatter(l, name, debugAsInfo)
|
||||
case "stdout":
|
||||
l.entry.Logger.Out = os.Stdout
|
||||
case "stderr":
|
||||
l.entry.Logger.Out = os.Stderr
|
||||
default:
|
||||
return fmt.Errorf("unsupported logger %q", u.Opaque)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sourced adds a source field to the logger that contains
|
||||
// the file name and line where the logging happened.
|
||||
func (l logger) sourced() *logrus.Entry {
|
||||
@@ -351,6 +345,11 @@ func Fatalf(format string, args ...interface{}) {
|
||||
baseLogger.sourced().Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// AddHook adds hook to Prometheus' original logger.
|
||||
func AddHook(hook logrus.Hook) {
|
||||
origLogger.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
type errorLogWriter struct{}
|
||||
|
||||
func (errorLogWriter) Write(b []byte) (int, error) {
|
||||
|
||||
21
vendor/github.com/prometheus/common/log/syslog_formatter.go
generated
vendored
21
vendor/github.com/prometheus/common/log/syslog_formatter.go
generated
vendored
@@ -20,11 +20,13 @@ import (
|
||||
"log/syslog"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ logrus.Formatter = (*syslogger)(nil)
|
||||
|
||||
func init() {
|
||||
setSyslogFormatter = func(appname, local string) error {
|
||||
setSyslogFormatter = func(l logger, appname, local string) error {
|
||||
if appname == "" {
|
||||
return fmt.Errorf("missing appname parameter")
|
||||
}
|
||||
@@ -32,18 +34,18 @@ func init() {
|
||||
return fmt.Errorf("missing local parameter")
|
||||
}
|
||||
|
||||
fmter, err := newSyslogger(appname, local, origLogger.Formatter)
|
||||
fmter, err := newSyslogger(appname, local, l.entry.Logger.Formatter)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
|
||||
origLogger.Errorf("can't connect logger to syslog: %v", err)
|
||||
l.entry.Errorf("can't connect logger to syslog: %v", err)
|
||||
return err
|
||||
}
|
||||
origLogger.Formatter = fmter
|
||||
l.entry.Logger.Formatter = fmter
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var ceeTag = []byte("@cee:")
|
||||
var prefixTag []byte
|
||||
|
||||
type syslogger struct {
|
||||
wrap logrus.Formatter
|
||||
@@ -56,6 +58,11 @@ func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*sys
|
||||
return nil, err
|
||||
}
|
||||
out, err := syslog.New(priority, appname)
|
||||
_, isJSON := fmter.(*logrus.JSONFormatter)
|
||||
if isJSON {
|
||||
// add cee tag to json formatted syslogs
|
||||
prefixTag = []byte("@cee:")
|
||||
}
|
||||
return &syslogger{
|
||||
out: out,
|
||||
wrap: fmter,
|
||||
@@ -92,7 +99,7 @@ func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
|
||||
}
|
||||
// only append tag to data sent to syslog (line), not to what
|
||||
// is returned
|
||||
line := string(append(ceeTag, data...))
|
||||
line := string(append(prefixTag, data...))
|
||||
|
||||
switch e.Level {
|
||||
case logrus.PanicLevel:
|
||||
|
||||
12
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
12
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
@@ -80,14 +80,18 @@ const (
|
||||
QuantileLabel = "quantile"
|
||||
)
|
||||
|
||||
// LabelNameRE is a regular expression matching valid label names.
|
||||
// LabelNameRE is a regular expression matching valid label names. Note that the
|
||||
// IsValid method of LabelName performs the same check but faster than a match
|
||||
// with this regular expression.
|
||||
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
|
||||
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
||||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// IsValid is true iff the label name matches the pattern of LabelNameRE.
|
||||
// IsValid is true iff the label name matches the pattern of LabelNameRE. This
|
||||
// method, however, does not use LabelNameRE for the check but a much faster
|
||||
// hardcoded implementation.
|
||||
func (ln LabelName) IsValid() bool {
|
||||
if len(ln) == 0 {
|
||||
return false
|
||||
@@ -106,7 +110,7 @@ func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !LabelNameRE.MatchString(s) {
|
||||
if !LabelName(s).IsValid() {
|
||||
return fmt.Errorf("%q is not a valid label name", s)
|
||||
}
|
||||
*ln = LabelName(s)
|
||||
@@ -119,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !LabelNameRE.MatchString(s) {
|
||||
if !LabelName(s).IsValid() {
|
||||
return fmt.Errorf("%q is not a valid label name", s)
|
||||
}
|
||||
*ln = LabelName(s)
|
||||
|
||||
2
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
2
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
@@ -160,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
|
||||
// LabelName as a string and does not call its UnmarshalJSON method.
|
||||
// Thus, we have to replicate the behavior here.
|
||||
for ln := range m {
|
||||
if !LabelNameRE.MatchString(string(ln)) {
|
||||
if !ln.IsValid() {
|
||||
return fmt.Errorf("%q is not a valid label name", ln)
|
||||
}
|
||||
}
|
||||
|
||||
11
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
11
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@@ -21,8 +21,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
separator = []byte{0}
|
||||
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
|
||||
separator = []byte{0}
|
||||
// MetricNameRE is a regular expression matching valid metric
|
||||
// names. Note that the IsValidMetricName function performs the same
|
||||
// check but faster than a match with this regular expression.
|
||||
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
|
||||
)
|
||||
|
||||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
||||
@@ -41,7 +44,7 @@ func (m Metric) Before(o Metric) bool {
|
||||
|
||||
// Clone returns a copy of the Metric.
|
||||
func (m Metric) Clone() Metric {
|
||||
clone := Metric{}
|
||||
clone := make(Metric, len(m))
|
||||
for k, v := range m {
|
||||
clone[k] = v
|
||||
}
|
||||
@@ -85,6 +88,8 @@ func (m Metric) FastFingerprint() Fingerprint {
|
||||
}
|
||||
|
||||
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
|
||||
// This function, however, does not use MetricNameRE for the check but a much
|
||||
// faster hardcoded implementation.
|
||||
func IsValidMetricName(n LabelValue) bool {
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
|
||||
17
vendor/github.com/prometheus/common/model/time.go
generated
vendored
17
vendor/github.com/prometheus/common/model/time.go
generated
vendored
@@ -163,9 +163,21 @@ func (t *Time) UnmarshalJSON(b []byte) error {
|
||||
// This type should not propagate beyond the scope of input/output processing.
|
||||
type Duration time.Duration
|
||||
|
||||
// Set implements pflag/flag.Value
|
||||
func (d *Duration) Set(s string) error {
|
||||
var err error
|
||||
*d, err = ParseDuration(s)
|
||||
return err
|
||||
}
|
||||
|
||||
// Type implements pflag.Value
|
||||
func (d *Duration) Type() string {
|
||||
return "duration"
|
||||
}
|
||||
|
||||
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
|
||||
|
||||
// StringToDuration parses a string into a time.Duration, assuming that a year
|
||||
// ParseDuration parses a string into a time.Duration, assuming that a year
|
||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||
func ParseDuration(durationStr string) (Duration, error) {
|
||||
matches := durationRE.FindStringSubmatch(durationStr)
|
||||
@@ -202,6 +214,9 @@ func (d Duration) String() string {
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
unit = "ms"
|
||||
)
|
||||
if ms == 0 {
|
||||
return "0s"
|
||||
}
|
||||
factors := map[string]int64{
|
||||
"y": 1000 * 60 * 60 * 24 * 365,
|
||||
"w": 1000 * 60 * 60 * 24 * 7,
|
||||
|
||||
21
vendor/github.com/prometheus/common/model/value.go
generated
vendored
21
vendor/github.com/prometheus/common/model/value.go
generated
vendored
@@ -22,6 +22,22 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
|
||||
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
|
||||
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
|
||||
// of 0, which is possible to appear in a real SamplePair and thus not
|
||||
// suitable to signal a non-existing SamplePair.
|
||||
ZeroSamplePair = SamplePair{Timestamp: Earliest}
|
||||
|
||||
// ZeroSample is the pseudo zero-value of Sample used to signal a
|
||||
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
|
||||
// and metric nil. Note that the natural zero value of Sample has a timestamp
|
||||
// of 0, which is possible to appear in a real Sample and thus not suitable
|
||||
// to signal a non-existing Sample.
|
||||
ZeroSample = Sample{Timestamp: Earliest}
|
||||
)
|
||||
|
||||
// A SampleValue is a representation of a value for a given sample at a given
|
||||
// time.
|
||||
type SampleValue float64
|
||||
@@ -113,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool {
|
||||
if !s.Timestamp.Equal(o.Timestamp) {
|
||||
return false
|
||||
}
|
||||
if s.Value.Equal(o.Value) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return s.Value.Equal(o.Value)
|
||||
}
|
||||
|
||||
func (s Sample) String() string {
|
||||
|
||||
20
vendor/github.com/prometheus/procfs/AUTHORS.md
generated
vendored
20
vendor/github.com/prometheus/procfs/AUTHORS.md
generated
vendored
@@ -1,20 +0,0 @@
|
||||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||
Julius Volz in 2012.
|
||||
|
||||
Maintainers of this repository:
|
||||
|
||||
* Tobias Schmidt <ts@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Armen Baghumian <abaghumian@noggin.com.au>
|
||||
* Bjoern Rabenstein <beorn@soundcloud.com>
|
||||
* David Cournapeau <cournape@gmail.com>
|
||||
* Ji-Hoon, Seol <jihoon.seol@gmail.com>
|
||||
* Jonas Große Sundrup <cherti@letopolis.de>
|
||||
* Julius Volz <julius.volz@gmail.com>
|
||||
* Matthias Rampke <mr@soundcloud.com>
|
||||
* Nicky Gerritsen <nicky@streamone.nl>
|
||||
* Rémi Audebert <contact@halfr.net>
|
||||
* Tobias Schmidt <tobidt@gmail.com>
|
||||
18
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
18
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
@@ -1,18 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
Prometheus uses GitHub to manage reviews of pull requests.
|
||||
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull
|
||||
request, addressing (with `@...`) one or more of the maintainers
|
||||
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
|
||||
|
||||
* If you plan to do something more involved, first discuss your ideas
|
||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||
This will avoid unnecessary work and surely give you and us a good deal
|
||||
of inspiration.
|
||||
|
||||
* Relevant coding style guidelines are the [Go Code Review
|
||||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||
Practices for Production
|
||||
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
||||
6
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
6
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
@@ -1,6 +0,0 @@
|
||||
ci:
|
||||
! gofmt -l *.go | read nothing
|
||||
go vet
|
||||
go test -v ./...
|
||||
go get github.com/golang/lint/golint
|
||||
golint *.go
|
||||
10
vendor/github.com/prometheus/procfs/README.md
generated
vendored
10
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@@ -1,10 +0,0 @@
|
||||
# procfs
|
||||
|
||||
This procfs package provides functions to retrieve system, kernel and process
|
||||
metrics from the pseudo-filesystem proc.
|
||||
|
||||
*WARNING*: This package is a work in progress. Its API may still break in
|
||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||
|
||||
[](https://godoc.org/github.com/prometheus/procfs)
|
||||
[](https://travis-ci.org/prometheus/procfs)
|
||||
95
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
Normal file
95
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A BuddyInfo is the details parsed from /proc/buddyinfo.
|
||||
// The data is comprised of an array of free fragments of each size.
|
||||
// The sizes are 2^n*PAGE_SIZE, where n is the array index.
|
||||
type BuddyInfo struct {
|
||||
Node string
|
||||
Zone string
|
||||
Sizes []float64
|
||||
}
|
||||
|
||||
// NewBuddyInfo reads the buddyinfo statistics.
|
||||
func NewBuddyInfo() ([]BuddyInfo, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs.NewBuddyInfo()
|
||||
}
|
||||
|
||||
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
|
||||
file, err := os.Open(fs.Path("buddyinfo"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return parseBuddyInfo(file)
|
||||
}
|
||||
|
||||
func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
||||
var (
|
||||
buddyInfo = []BuddyInfo{}
|
||||
scanner = bufio.NewScanner(r)
|
||||
bucketCount = -1
|
||||
)
|
||||
|
||||
for scanner.Scan() {
|
||||
var err error
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(line)
|
||||
|
||||
if len(parts) < 4 {
|
||||
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
|
||||
}
|
||||
|
||||
node := strings.TrimRight(parts[1], ",")
|
||||
zone := strings.TrimRight(parts[3], ",")
|
||||
arraySize := len(parts[4:])
|
||||
|
||||
if bucketCount == -1 {
|
||||
bucketCount = arraySize
|
||||
} else {
|
||||
if bucketCount != arraySize {
|
||||
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
|
||||
}
|
||||
}
|
||||
|
||||
sizes := make([]float64, arraySize)
|
||||
for i := 0; i < arraySize; i++ {
|
||||
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
|
||||
}
|
||||
|
||||
return buddyInfo, scanner.Err()
|
||||
}
|
||||
49
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
49
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@@ -1,9 +1,25 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/prometheus/procfs/nfs"
|
||||
"github.com/prometheus/procfs/xfs"
|
||||
)
|
||||
|
||||
// FS represents the pseudo-filesystem proc, which provides an interface to
|
||||
@@ -31,3 +47,36 @@ func NewFS(mountPoint string) (FS, error) {
|
||||
func (fs FS) Path(p ...string) string {
|
||||
return path.Join(append([]string{string(fs)}, p...)...)
|
||||
}
|
||||
|
||||
// XFSStats retrieves XFS filesystem runtime statistics.
|
||||
func (fs FS) XFSStats() (*xfs.Stats, error) {
|
||||
f, err := os.Open(fs.Path("fs/xfs/stat"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return xfs.ParseStats(f)
|
||||
}
|
||||
|
||||
// NFSClientRPCStats retrieves NFS client RPC statistics.
|
||||
func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
|
||||
f, err := os.Open(fs.Path("net/rpc/nfs"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return nfs.ParseClientRPCStats(f)
|
||||
}
|
||||
|
||||
// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
|
||||
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
|
||||
f, err := os.Open(fs.Path("net/rpc/nfsd"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return nfs.ParseServerRPCStats(f)
|
||||
}
|
||||
|
||||
46
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
Normal file
46
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import "strconv"
|
||||
|
||||
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
||||
func ParseUint32s(ss []string) ([]uint32, error) {
|
||||
us := make([]uint32, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
u, err := strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us = append(us, uint32(u))
|
||||
}
|
||||
|
||||
return us, nil
|
||||
}
|
||||
|
||||
// ParseUint64s parses a slice of strings into a slice of uint64s.
|
||||
func ParseUint64s(ss []string) ([]uint64, error) {
|
||||
us := make([]uint64, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
u, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us = append(us, u)
|
||||
}
|
||||
|
||||
return us, nil
|
||||
}
|
||||
69
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
69
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
@@ -1,3 +1,16 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
@@ -31,14 +44,16 @@ type IPVSStats struct {
|
||||
type IPVSBackendStatus struct {
|
||||
// The local (virtual) IP address.
|
||||
LocalAddress net.IP
|
||||
// The local (virtual) port.
|
||||
LocalPort uint16
|
||||
// The transport protocol (TCP, UDP).
|
||||
Proto string
|
||||
// The remote (real) IP address.
|
||||
RemoteAddress net.IP
|
||||
// The local (virtual) port.
|
||||
LocalPort uint16
|
||||
// The remote (real) port.
|
||||
RemotePort uint16
|
||||
// The local firewall mark
|
||||
LocalMark string
|
||||
// The transport protocol (TCP, UDP).
|
||||
Proto string
|
||||
// The current number of active connections for this virtual/real address pair.
|
||||
ActiveConn uint64
|
||||
// The current number of inactive connections for this virtual/real address pair.
|
||||
@@ -142,13 +157,14 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||
status []IPVSBackendStatus
|
||||
scanner = bufio.NewScanner(file)
|
||||
proto string
|
||||
localMark string
|
||||
localAddress net.IP
|
||||
localPort uint16
|
||||
err error
|
||||
)
|
||||
|
||||
for scanner.Scan() {
|
||||
fields := strings.Fields(string(scanner.Text()))
|
||||
fields := strings.Fields(scanner.Text())
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -160,10 +176,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||
continue
|
||||
}
|
||||
proto = fields[0]
|
||||
localMark = ""
|
||||
localAddress, localPort, err = parseIPPort(fields[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case fields[0] == "FWM":
|
||||
if len(fields) < 2 {
|
||||
continue
|
||||
}
|
||||
proto = fields[0]
|
||||
localMark = fields[1]
|
||||
localAddress = nil
|
||||
localPort = 0
|
||||
case fields[0] == "->":
|
||||
if len(fields) < 6 {
|
||||
continue
|
||||
@@ -187,6 +212,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||
status = append(status, IPVSBackendStatus{
|
||||
LocalAddress: localAddress,
|
||||
LocalPort: localPort,
|
||||
LocalMark: localMark,
|
||||
RemoteAddress: remoteAddress,
|
||||
RemotePort: remotePort,
|
||||
Proto: proto,
|
||||
@@ -200,22 +226,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||
}
|
||||
|
||||
func parseIPPort(s string) (net.IP, uint16, error) {
|
||||
tmp := strings.SplitN(s, ":", 2)
|
||||
var (
|
||||
ip net.IP
|
||||
err error
|
||||
)
|
||||
|
||||
if len(tmp) != 2 {
|
||||
return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
|
||||
switch len(s) {
|
||||
case 13:
|
||||
ip, err = hex.DecodeString(s[0:8])
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
case 46:
|
||||
ip = net.ParseIP(s[1:40])
|
||||
if ip == nil {
|
||||
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
|
||||
}
|
||||
default:
|
||||
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
|
||||
}
|
||||
|
||||
if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
|
||||
return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
|
||||
portString := s[len(s)-4:]
|
||||
if len(portString) != 4 {
|
||||
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
|
||||
}
|
||||
|
||||
ip, err := hex.DecodeString(tmp[0])
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
port, err := strconv.ParseUint(tmp[1], 16, 16)
|
||||
port, err := strconv.ParseUint(portString, 16, 16)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
13
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
13
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@@ -1,3 +1,16 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
|
||||
569
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
Normal file
569
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
Normal file
@@ -0,0 +1,569 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
|
||||
// heavily as a reference:
|
||||
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
|
||||
//
|
||||
// Special thanks to Chris Siebenmann for all of his posts explaining the
|
||||
// various statistics available for NFS.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Constants shared between multiple functions.
|
||||
const (
|
||||
deviceEntryLen = 8
|
||||
|
||||
fieldBytesLen = 8
|
||||
fieldEventsLen = 27
|
||||
|
||||
statVersion10 = "1.0"
|
||||
statVersion11 = "1.1"
|
||||
|
||||
fieldTransport10Len = 10
|
||||
fieldTransport11Len = 13
|
||||
)
|
||||
|
||||
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
|
||||
type Mount struct {
|
||||
// Name of the device.
|
||||
Device string
|
||||
// The mount point of the device.
|
||||
Mount string
|
||||
// The filesystem type used by the device.
|
||||
Type string
|
||||
// If available additional statistics related to this Mount.
|
||||
// Use a type assertion to determine if additional statistics are available.
|
||||
Stats MountStats
|
||||
}
|
||||
|
||||
// A MountStats is a type which contains detailed statistics for a specific
|
||||
// type of Mount.
|
||||
type MountStats interface {
|
||||
mountStats()
|
||||
}
|
||||
|
||||
// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
|
||||
type MountStatsNFS struct {
|
||||
// The version of statistics provided.
|
||||
StatVersion string
|
||||
// The age of the NFS mount.
|
||||
Age time.Duration
|
||||
// Statistics related to byte counters for various operations.
|
||||
Bytes NFSBytesStats
|
||||
// Statistics related to various NFS event occurrences.
|
||||
Events NFSEventsStats
|
||||
// Statistics broken down by filesystem operation.
|
||||
Operations []NFSOperationStats
|
||||
// Statistics about the NFS RPC transport.
|
||||
Transport NFSTransportStats
|
||||
}
|
||||
|
||||
// mountStats implements MountStats.
|
||||
func (m MountStatsNFS) mountStats() {}
|
||||
|
||||
// A NFSBytesStats contains statistics about the number of bytes read and written
|
||||
// by an NFS client to and from an NFS server.
|
||||
type NFSBytesStats struct {
|
||||
// Number of bytes read using the read() syscall.
|
||||
Read uint64
|
||||
// Number of bytes written using the write() syscall.
|
||||
Write uint64
|
||||
// Number of bytes read using the read() syscall in O_DIRECT mode.
|
||||
DirectRead uint64
|
||||
// Number of bytes written using the write() syscall in O_DIRECT mode.
|
||||
DirectWrite uint64
|
||||
// Number of bytes read from the NFS server, in total.
|
||||
ReadTotal uint64
|
||||
// Number of bytes written to the NFS server, in total.
|
||||
WriteTotal uint64
|
||||
// Number of pages read directly via mmap()'d files.
|
||||
ReadPages uint64
|
||||
// Number of pages written directly via mmap()'d files.
|
||||
WritePages uint64
|
||||
}
|
||||
|
||||
// A NFSEventsStats contains statistics about NFS event occurrences.
|
||||
type NFSEventsStats struct {
|
||||
// Number of times cached inode attributes are re-validated from the server.
|
||||
InodeRevalidate uint64
|
||||
// Number of times cached dentry nodes are re-validated from the server.
|
||||
DnodeRevalidate uint64
|
||||
// Number of times an inode cache is cleared.
|
||||
DataInvalidate uint64
|
||||
// Number of times cached inode attributes are invalidated.
|
||||
AttributeInvalidate uint64
|
||||
// Number of times files or directories have been open()'d.
|
||||
VFSOpen uint64
|
||||
// Number of times a directory lookup has occurred.
|
||||
VFSLookup uint64
|
||||
// Number of times permissions have been checked.
|
||||
VFSAccess uint64
|
||||
// Number of updates (and potential writes) to pages.
|
||||
VFSUpdatePage uint64
|
||||
// Number of pages read directly via mmap()'d files.
|
||||
VFSReadPage uint64
|
||||
// Number of times a group of pages have been read.
|
||||
VFSReadPages uint64
|
||||
// Number of pages written directly via mmap()'d files.
|
||||
VFSWritePage uint64
|
||||
// Number of times a group of pages have been written.
|
||||
VFSWritePages uint64
|
||||
// Number of times directory entries have been read with getdents().
|
||||
VFSGetdents uint64
|
||||
// Number of times attributes have been set on inodes.
|
||||
VFSSetattr uint64
|
||||
// Number of pending writes that have been forcefully flushed to the server.
|
||||
VFSFlush uint64
|
||||
// Number of times fsync() has been called on directories and files.
|
||||
VFSFsync uint64
|
||||
// Number of times locking has been attempted on a file.
|
||||
VFSLock uint64
|
||||
// Number of times files have been closed and released.
|
||||
VFSFileRelease uint64
|
||||
// Unknown. Possibly unused.
|
||||
CongestionWait uint64
|
||||
// Number of times files have been truncated.
|
||||
Truncation uint64
|
||||
// Number of times a file has been grown due to writes beyond its existing end.
|
||||
WriteExtension uint64
|
||||
// Number of times a file was removed while still open by another process.
|
||||
SillyRename uint64
|
||||
// Number of times the NFS server gave less data than expected while reading.
|
||||
ShortRead uint64
|
||||
// Number of times the NFS server wrote less data than expected while writing.
|
||||
ShortWrite uint64
|
||||
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
|
||||
// offline storage.
|
||||
JukeboxDelay uint64
|
||||
// Number of NFS v4.1+ pNFS reads.
|
||||
PNFSRead uint64
|
||||
// Number of NFS v4.1+ pNFS writes.
|
||||
PNFSWrite uint64
|
||||
}
|
||||
|
||||
// A NFSOperationStats contains statistics for a single operation.
|
||||
type NFSOperationStats struct {
|
||||
// The name of the operation.
|
||||
Operation string
|
||||
// Number of requests performed for this operation.
|
||||
Requests uint64
|
||||
// Number of times an actual RPC request has been transmitted for this operation.
|
||||
Transmissions uint64
|
||||
// Number of times a request has had a major timeout.
|
||||
MajorTimeouts uint64
|
||||
// Number of bytes sent for this operation, including RPC headers and payload.
|
||||
BytesSent uint64
|
||||
// Number of bytes received for this operation, including RPC headers and payload.
|
||||
BytesReceived uint64
|
||||
// Duration all requests spent queued for transmission before they were sent.
|
||||
CumulativeQueueTime time.Duration
|
||||
// Duration it took to get a reply back after the request was transmitted.
|
||||
CumulativeTotalResponseTime time.Duration
|
||||
// Duration from when a request was enqueued to when it was completely handled.
|
||||
CumulativeTotalRequestTime time.Duration
|
||||
}
|
||||
|
||||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||
// responses.
|
||||
type NFSTransportStats struct {
|
||||
// The local port used for the NFS mount.
|
||||
Port uint64
|
||||
// Number of times the client has had to establish a connection from scratch
|
||||
// to the NFS server.
|
||||
Bind uint64
|
||||
// Number of times the client has made a TCP connection to the NFS server.
|
||||
Connect uint64
|
||||
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
|
||||
// spent waiting for connections to the server to be established.
|
||||
ConnectIdleTime uint64
|
||||
// Duration since the NFS mount last saw any RPC traffic.
|
||||
IdleTime time.Duration
|
||||
// Number of RPC requests for this mount sent to the NFS server.
|
||||
Sends uint64
|
||||
// Number of RPC responses for this mount received from the NFS server.
|
||||
Receives uint64
|
||||
// Number of times the NFS server sent a response with a transaction ID
|
||||
// unknown to this client.
|
||||
BadTransactionIDs uint64
|
||||
// A running counter, incremented on each request as the current difference
|
||||
// ebetween sends and receives.
|
||||
CumulativeActiveRequests uint64
|
||||
// A running counter, incremented on each request by the current backlog
|
||||
// queue size.
|
||||
CumulativeBacklog uint64
|
||||
|
||||
// Stats below only available with stat version 1.1.
|
||||
|
||||
// Maximum number of simultaneously active RPC requests ever used.
|
||||
MaximumRPCSlotsUsed uint64
|
||||
// A running counter, incremented on each request as the current size of the
|
||||
// sending queue.
|
||||
CumulativeSendingQueue uint64
|
||||
// A running counter, incremented on each request as the current size of the
|
||||
// pending queue.
|
||||
CumulativePendingQueue uint64
|
||||
}
|
||||
|
||||
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
|
||||
// of Mount structures containing detailed information about each mount.
|
||||
// If available, statistics for each mount are parsed as well.
|
||||
func parseMountStats(r io.Reader) ([]*Mount, error) {
|
||||
const (
|
||||
device = "device"
|
||||
statVersionPrefix = "statvers="
|
||||
|
||||
nfs3Type = "nfs"
|
||||
nfs4Type = "nfs4"
|
||||
)
|
||||
|
||||
var mounts []*Mount
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Only look for device entries in this function
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) == 0 || ss[0] != device {
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMount(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Does this mount also possess statistics information?
|
||||
if len(ss) > deviceEntryLen {
|
||||
// Only NFSv3 and v4 are supported for parsing statistics
|
||||
if m.Type != nfs3Type && m.Type != nfs4Type {
|
||||
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
|
||||
}
|
||||
|
||||
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
|
||||
|
||||
stats, err := parseMountStatsNFS(s, statVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Stats = stats
|
||||
}
|
||||
|
||||
mounts = append(mounts, m)
|
||||
}
|
||||
|
||||
return mounts, s.Err()
|
||||
}
|
||||
|
||||
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
|
||||
// device [device] mounted on [mount] with fstype [type]
|
||||
func parseMount(ss []string) (*Mount, error) {
|
||||
if len(ss) < deviceEntryLen {
|
||||
return nil, fmt.Errorf("invalid device entry: %v", ss)
|
||||
}
|
||||
|
||||
// Check for specific words appearing at specific indices to ensure
|
||||
// the format is consistent with what we expect
|
||||
format := []struct {
|
||||
i int
|
||||
s string
|
||||
}{
|
||||
{i: 0, s: "device"},
|
||||
{i: 2, s: "mounted"},
|
||||
{i: 3, s: "on"},
|
||||
{i: 5, s: "with"},
|
||||
{i: 6, s: "fstype"},
|
||||
}
|
||||
|
||||
for _, f := range format {
|
||||
if ss[f.i] != f.s {
|
||||
return nil, fmt.Errorf("invalid device entry: %v", ss)
|
||||
}
|
||||
}
|
||||
|
||||
return &Mount{
|
||||
Device: ss[1],
|
||||
Mount: ss[4],
|
||||
Type: ss[7],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
|
||||
// related to NFS statistics.
|
||||
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
|
||||
// Field indicators for parsing specific types of data
|
||||
const (
|
||||
fieldAge = "age:"
|
||||
fieldBytes = "bytes:"
|
||||
fieldEvents = "events:"
|
||||
fieldPerOpStats = "per-op"
|
||||
fieldTransport = "xprt:"
|
||||
)
|
||||
|
||||
stats := &MountStatsNFS{
|
||||
StatVersion: statVersion,
|
||||
}
|
||||
|
||||
for s.Scan() {
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) == 0 {
|
||||
break
|
||||
}
|
||||
if len(ss) < 2 {
|
||||
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
|
||||
}
|
||||
|
||||
switch ss[0] {
|
||||
case fieldAge:
|
||||
// Age integer is in seconds
|
||||
d, err := time.ParseDuration(ss[1] + "s")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Age = d
|
||||
case fieldBytes:
|
||||
bstats, err := parseNFSBytesStats(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Bytes = *bstats
|
||||
case fieldEvents:
|
||||
estats, err := parseNFSEventsStats(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Events = *estats
|
||||
case fieldTransport:
|
||||
if len(ss) < 3 {
|
||||
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
|
||||
}
|
||||
|
||||
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Transport = *tstats
|
||||
}
|
||||
|
||||
// When encountering "per-operation statistics", we must break this
|
||||
// loop and parse them separately to ensure we can terminate parsing
|
||||
// before reaching another device entry; hence why this 'if' statement
|
||||
// is not just another switch case
|
||||
if ss[0] == fieldPerOpStats {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NFS per-operation stats appear last before the next device entry
|
||||
perOpStats, err := parseNFSOperationStats(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Operations = perOpStats
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// parseNFSBytesStats parses a NFSBytesStats line using an input set of
|
||||
// integer fields.
|
||||
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
|
||||
if len(ss) != fieldBytesLen {
|
||||
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
|
||||
}
|
||||
|
||||
ns := make([]uint64, 0, fieldBytesLen)
|
||||
for _, s := range ss {
|
||||
n, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
return &NFSBytesStats{
|
||||
Read: ns[0],
|
||||
Write: ns[1],
|
||||
DirectRead: ns[2],
|
||||
DirectWrite: ns[3],
|
||||
ReadTotal: ns[4],
|
||||
WriteTotal: ns[5],
|
||||
ReadPages: ns[6],
|
||||
WritePages: ns[7],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseNFSEventsStats parses a NFSEventsStats line using an input set of
|
||||
// integer fields.
|
||||
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
|
||||
if len(ss) != fieldEventsLen {
|
||||
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
|
||||
}
|
||||
|
||||
ns := make([]uint64, 0, fieldEventsLen)
|
||||
for _, s := range ss {
|
||||
n, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
return &NFSEventsStats{
|
||||
InodeRevalidate: ns[0],
|
||||
DnodeRevalidate: ns[1],
|
||||
DataInvalidate: ns[2],
|
||||
AttributeInvalidate: ns[3],
|
||||
VFSOpen: ns[4],
|
||||
VFSLookup: ns[5],
|
||||
VFSAccess: ns[6],
|
||||
VFSUpdatePage: ns[7],
|
||||
VFSReadPage: ns[8],
|
||||
VFSReadPages: ns[9],
|
||||
VFSWritePage: ns[10],
|
||||
VFSWritePages: ns[11],
|
||||
VFSGetdents: ns[12],
|
||||
VFSSetattr: ns[13],
|
||||
VFSFlush: ns[14],
|
||||
VFSFsync: ns[15],
|
||||
VFSLock: ns[16],
|
||||
VFSFileRelease: ns[17],
|
||||
CongestionWait: ns[18],
|
||||
Truncation: ns[19],
|
||||
WriteExtension: ns[20],
|
||||
SillyRename: ns[21],
|
||||
ShortRead: ns[22],
|
||||
ShortWrite: ns[23],
|
||||
JukeboxDelay: ns[24],
|
||||
PNFSRead: ns[25],
|
||||
PNFSWrite: ns[26],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
|
||||
// additional information about per-operation statistics until an empty
|
||||
// line is reached.
|
||||
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
const (
|
||||
// Number of expected fields in each per-operation statistics set
|
||||
numFields = 9
|
||||
)
|
||||
|
||||
var ops []NFSOperationStats
|
||||
|
||||
for s.Scan() {
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) == 0 {
|
||||
// Must break when reading a blank line after per-operation stats to
|
||||
// enable top-level function to parse the next device entry
|
||||
break
|
||||
}
|
||||
|
||||
if len(ss) != numFields {
|
||||
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
|
||||
}
|
||||
|
||||
// Skip string operation name for integers
|
||||
ns := make([]uint64, 0, numFields-1)
|
||||
for _, st := range ss[1:] {
|
||||
n, err := strconv.ParseUint(st, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
ops = append(ops, NFSOperationStats{
|
||||
Operation: strings.TrimSuffix(ss[0], ":"),
|
||||
Requests: ns[0],
|
||||
Transmissions: ns[1],
|
||||
MajorTimeouts: ns[2],
|
||||
BytesSent: ns[3],
|
||||
BytesReceived: ns[4],
|
||||
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
|
||||
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
|
||||
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
|
||||
})
|
||||
}
|
||||
|
||||
return ops, s.Err()
|
||||
}
|
||||
|
||||
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
|
||||
// integer fields matched to a specific stats version.
|
||||
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
|
||||
switch statVersion {
|
||||
case statVersion10:
|
||||
if len(ss) != fieldTransport10Len {
|
||||
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
|
||||
}
|
||||
case statVersion11:
|
||||
if len(ss) != fieldTransport11Len {
|
||||
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
|
||||
}
|
||||
|
||||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||
// in a v1.0 response.
|
||||
//
|
||||
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
|
||||
// only v1.0 stats are present.
|
||||
// See: https://github.com/prometheus/node_exporter/issues/571.
|
||||
ns := make([]uint64, fieldTransport11Len)
|
||||
for i, s := range ss {
|
||||
n, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns[i] = n
|
||||
}
|
||||
|
||||
return &NFSTransportStats{
|
||||
Port: ns[0],
|
||||
Bind: ns[1],
|
||||
Connect: ns[2],
|
||||
ConnectIdleTime: ns[3],
|
||||
IdleTime: time.Duration(ns[4]) * time.Second,
|
||||
Sends: ns[5],
|
||||
Receives: ns[6],
|
||||
BadTransactionIDs: ns[7],
|
||||
CumulativeActiveRequests: ns[8],
|
||||
CumulativeBacklog: ns[9],
|
||||
MaximumRPCSlotsUsed: ns[10],
|
||||
CumulativeSendingQueue: ns[11],
|
||||
CumulativePendingQueue: ns[12],
|
||||
}, nil
|
||||
}
|
||||
216
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
Normal file
216
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
|
||||
type NetDevLine struct {
|
||||
Name string `json:"name"` // The name of the interface.
|
||||
RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
|
||||
RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
|
||||
RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
|
||||
RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
|
||||
RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
|
||||
RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
|
||||
RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
|
||||
RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
|
||||
TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
|
||||
TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
|
||||
TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
|
||||
TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
|
||||
TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
|
||||
TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
|
||||
TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
|
||||
TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
|
||||
}
|
||||
|
||||
// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
|
||||
// are interface names.
|
||||
type NetDev map[string]NetDevLine
|
||||
|
||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
||||
func NewNetDev() (NetDev, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs.NewNetDev()
|
||||
}
|
||||
|
||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
||||
func (fs FS) NewNetDev() (NetDev, error) {
|
||||
return newNetDev(fs.Path("net/dev"))
|
||||
}
|
||||
|
||||
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
|
||||
func (p Proc) NewNetDev() (NetDev, error) {
|
||||
return newNetDev(p.path("net/dev"))
|
||||
}
|
||||
|
||||
// newNetDev creates a new NetDev from the contents of the given file.
|
||||
func newNetDev(file string) (NetDev, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return NetDev{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
nd := NetDev{}
|
||||
s := bufio.NewScanner(f)
|
||||
for n := 0; s.Scan(); n++ {
|
||||
// Skip the 2 header lines.
|
||||
if n < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
line, err := nd.parseLine(s.Text())
|
||||
if err != nil {
|
||||
return nd, err
|
||||
}
|
||||
|
||||
nd[line.Name] = *line
|
||||
}
|
||||
|
||||
return nd, s.Err()
|
||||
}
|
||||
|
||||
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
||||
// must be filtered prior to calling this method.
|
||||
func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||
parts := strings.SplitN(rawLine, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("invalid net/dev line, missing colon")
|
||||
}
|
||||
fields := strings.Fields(strings.TrimSpace(parts[1]))
|
||||
|
||||
var err error
|
||||
line := &NetDevLine{}
|
||||
|
||||
// Interface Name
|
||||
line.Name = strings.TrimSpace(parts[0])
|
||||
if line.Name == "" {
|
||||
return nil, errors.New("invalid net/dev line, empty interface name")
|
||||
}
|
||||
|
||||
// RX
|
||||
line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TX
|
||||
line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
||||
|
||||
// Total aggregates the values across interfaces and returns a new NetDevLine.
|
||||
// The Name field will be a sorted comma seperated list of interface names.
|
||||
func (nd NetDev) Total() NetDevLine {
|
||||
total := NetDevLine{}
|
||||
|
||||
names := make([]string, 0, len(nd))
|
||||
for _, ifc := range nd {
|
||||
names = append(names, ifc.Name)
|
||||
total.RxBytes += ifc.RxBytes
|
||||
total.RxPackets += ifc.RxPackets
|
||||
total.RxPackets += ifc.RxPackets
|
||||
total.RxErrors += ifc.RxErrors
|
||||
total.RxDropped += ifc.RxDropped
|
||||
total.RxFIFO += ifc.RxFIFO
|
||||
total.RxFrame += ifc.RxFrame
|
||||
total.RxCompressed += ifc.RxCompressed
|
||||
total.RxMulticast += ifc.RxMulticast
|
||||
total.TxBytes += ifc.TxBytes
|
||||
total.TxPackets += ifc.TxPackets
|
||||
total.TxErrors += ifc.TxErrors
|
||||
total.TxDropped += ifc.TxDropped
|
||||
total.TxFIFO += ifc.TxFIFO
|
||||
total.TxCollisions += ifc.TxCollisions
|
||||
total.TxCarrier += ifc.TxCarrier
|
||||
total.TxCompressed += ifc.TxCompressed
|
||||
}
|
||||
sort.Strings(names)
|
||||
total.Name = strings.Join(names, ", ")
|
||||
|
||||
return total
|
||||
}
|
||||
263
vendor/github.com/prometheus/procfs/nfs/nfs.go
generated
vendored
Normal file
263
vendor/github.com/prometheus/procfs/nfs/nfs.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package nfsd implements parsing of /proc/net/rpc/nfsd.
|
||||
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
|
||||
package nfs
|
||||
|
||||
// ReplyCache models the "rc" line.
|
||||
type ReplyCache struct {
|
||||
Hits uint64
|
||||
Misses uint64
|
||||
NoCache uint64
|
||||
}
|
||||
|
||||
// FileHandles models the "fh" line.
|
||||
type FileHandles struct {
|
||||
Stale uint64
|
||||
TotalLookups uint64
|
||||
AnonLookups uint64
|
||||
DirNoCache uint64
|
||||
NoDirNoCache uint64
|
||||
}
|
||||
|
||||
// InputOutput models the "io" line.
|
||||
type InputOutput struct {
|
||||
Read uint64
|
||||
Write uint64
|
||||
}
|
||||
|
||||
// Threads models the "th" line.
|
||||
type Threads struct {
|
||||
Threads uint64
|
||||
FullCnt uint64
|
||||
}
|
||||
|
||||
// ReadAheadCache models the "ra" line.
|
||||
type ReadAheadCache struct {
|
||||
CacheSize uint64
|
||||
CacheHistogram []uint64
|
||||
NotFound uint64
|
||||
}
|
||||
|
||||
// Network models the "net" line.
|
||||
type Network struct {
|
||||
NetCount uint64
|
||||
UDPCount uint64
|
||||
TCPCount uint64
|
||||
TCPConnect uint64
|
||||
}
|
||||
|
||||
// ClientRPC models the nfs "rpc" line.
|
||||
type ClientRPC struct {
|
||||
RPCCount uint64
|
||||
Retransmissions uint64
|
||||
AuthRefreshes uint64
|
||||
}
|
||||
|
||||
// ServerRPC models the nfsd "rpc" line.
|
||||
type ServerRPC struct {
|
||||
RPCCount uint64
|
||||
BadCnt uint64
|
||||
BadFmt uint64
|
||||
BadAuth uint64
|
||||
BadcInt uint64
|
||||
}
|
||||
|
||||
// V2Stats models the "proc2" line.
|
||||
type V2Stats struct {
|
||||
Null uint64
|
||||
GetAttr uint64
|
||||
SetAttr uint64
|
||||
Root uint64
|
||||
Lookup uint64
|
||||
ReadLink uint64
|
||||
Read uint64
|
||||
WrCache uint64
|
||||
Write uint64
|
||||
Create uint64
|
||||
Remove uint64
|
||||
Rename uint64
|
||||
Link uint64
|
||||
SymLink uint64
|
||||
MkDir uint64
|
||||
RmDir uint64
|
||||
ReadDir uint64
|
||||
FsStat uint64
|
||||
}
|
||||
|
||||
// V3Stats models the "proc3" line.
|
||||
type V3Stats struct {
|
||||
Null uint64
|
||||
GetAttr uint64
|
||||
SetAttr uint64
|
||||
Lookup uint64
|
||||
Access uint64
|
||||
ReadLink uint64
|
||||
Read uint64
|
||||
Write uint64
|
||||
Create uint64
|
||||
MkDir uint64
|
||||
SymLink uint64
|
||||
MkNod uint64
|
||||
Remove uint64
|
||||
RmDir uint64
|
||||
Rename uint64
|
||||
Link uint64
|
||||
ReadDir uint64
|
||||
ReadDirPlus uint64
|
||||
FsStat uint64
|
||||
FsInfo uint64
|
||||
PathConf uint64
|
||||
Commit uint64
|
||||
}
|
||||
|
||||
// ClientV4Stats models the nfs "proc4" line.
|
||||
type ClientV4Stats struct {
|
||||
Null uint64
|
||||
Read uint64
|
||||
Write uint64
|
||||
Commit uint64
|
||||
Open uint64
|
||||
OpenConfirm uint64
|
||||
OpenNoattr uint64
|
||||
OpenDowngrade uint64
|
||||
Close uint64
|
||||
Setattr uint64
|
||||
FsInfo uint64
|
||||
Renew uint64
|
||||
SetClientId uint64
|
||||
SetClientIdConfirm uint64
|
||||
Lock uint64
|
||||
Lockt uint64
|
||||
Locku uint64
|
||||
Access uint64
|
||||
Getattr uint64
|
||||
Lookup uint64
|
||||
LookupRoot uint64
|
||||
Remove uint64
|
||||
Rename uint64
|
||||
Link uint64
|
||||
Symlink uint64
|
||||
Create uint64
|
||||
Pathconf uint64
|
||||
StatFs uint64
|
||||
ReadLink uint64
|
||||
ReadDir uint64
|
||||
ServerCaps uint64
|
||||
DelegReturn uint64
|
||||
GetAcl uint64
|
||||
SetAcl uint64
|
||||
FsLocations uint64
|
||||
ReleaseLockowner uint64
|
||||
Secinfo uint64
|
||||
FsidPresent uint64
|
||||
ExchangeId uint64
|
||||
CreateSession uint64
|
||||
DestroySession uint64
|
||||
Sequence uint64
|
||||
GetLeaseTime uint64
|
||||
ReclaimComplete uint64
|
||||
LayoutGet uint64
|
||||
GetDeviceInfo uint64
|
||||
LayoutCommit uint64
|
||||
LayoutReturn uint64
|
||||
SecinfoNoName uint64
|
||||
TestStateId uint64
|
||||
FreeStateId uint64
|
||||
GetDeviceList uint64
|
||||
BindConnToSession uint64
|
||||
DestroyClientId uint64
|
||||
Seek uint64
|
||||
Allocate uint64
|
||||
DeAllocate uint64
|
||||
LayoutStats uint64
|
||||
Clone uint64
|
||||
}
|
||||
|
||||
// ServerV4Stats models the nfsd "proc4" line.
|
||||
type ServerV4Stats struct {
|
||||
Null uint64
|
||||
Compound uint64
|
||||
}
|
||||
|
||||
// V4Ops models the "proc4ops" line: NFSv4 operations
|
||||
// Variable list, see:
|
||||
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
|
||||
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
|
||||
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
|
||||
type V4Ops struct {
|
||||
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
|
||||
Op0Unused uint64
|
||||
Op1Unused uint64
|
||||
Op2Future uint64
|
||||
Access uint64
|
||||
Close uint64
|
||||
Commit uint64
|
||||
Create uint64
|
||||
DelegPurge uint64
|
||||
DelegReturn uint64
|
||||
GetAttr uint64
|
||||
GetFH uint64
|
||||
Link uint64
|
||||
Lock uint64
|
||||
Lockt uint64
|
||||
Locku uint64
|
||||
Lookup uint64
|
||||
LookupRoot uint64
|
||||
Nverify uint64
|
||||
Open uint64
|
||||
OpenAttr uint64
|
||||
OpenConfirm uint64
|
||||
OpenDgrd uint64
|
||||
PutFH uint64
|
||||
PutPubFH uint64
|
||||
PutRootFH uint64
|
||||
Read uint64
|
||||
ReadDir uint64
|
||||
ReadLink uint64
|
||||
Remove uint64
|
||||
Rename uint64
|
||||
Renew uint64
|
||||
RestoreFH uint64
|
||||
SaveFH uint64
|
||||
SecInfo uint64
|
||||
SetAttr uint64
|
||||
Verify uint64
|
||||
Write uint64
|
||||
RelLockOwner uint64
|
||||
}
|
||||
|
||||
// RPCStats models all stats from /proc/net/rpc/nfs.
|
||||
type ClientRPCStats struct {
|
||||
Network Network
|
||||
ClientRPC ClientRPC
|
||||
V2Stats V2Stats
|
||||
V3Stats V3Stats
|
||||
ClientV4Stats ClientV4Stats
|
||||
}
|
||||
|
||||
// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
|
||||
type ServerRPCStats struct {
|
||||
ReplyCache ReplyCache
|
||||
FileHandles FileHandles
|
||||
InputOutput InputOutput
|
||||
Threads Threads
|
||||
ReadAheadCache ReadAheadCache
|
||||
Network Network
|
||||
ServerRPC ServerRPC
|
||||
V2Stats V2Stats
|
||||
V3Stats V3Stats
|
||||
ServerV4Stats ServerV4Stats
|
||||
V4Ops V4Ops
|
||||
}
|
||||
317
vendor/github.com/prometheus/procfs/nfs/parse.go
generated
vendored
Normal file
317
vendor/github.com/prometheus/procfs/nfs/parse.go
generated
vendored
Normal file
@@ -0,0 +1,317 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func parseReplyCache(v []uint64) (ReplyCache, error) {
|
||||
if len(v) != 3 {
|
||||
return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
|
||||
}
|
||||
|
||||
return ReplyCache{
|
||||
Hits: v[0],
|
||||
Misses: v[1],
|
||||
NoCache: v[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseFileHandles(v []uint64) (FileHandles, error) {
|
||||
if len(v) != 5 {
|
||||
return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
|
||||
}
|
||||
|
||||
return FileHandles{
|
||||
Stale: v[0],
|
||||
TotalLookups: v[1],
|
||||
AnonLookups: v[2],
|
||||
DirNoCache: v[3],
|
||||
NoDirNoCache: v[4],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseInputOutput(v []uint64) (InputOutput, error) {
|
||||
if len(v) != 2 {
|
||||
return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
|
||||
}
|
||||
|
||||
return InputOutput{
|
||||
Read: v[0],
|
||||
Write: v[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseThreads(v []uint64) (Threads, error) {
|
||||
if len(v) != 2 {
|
||||
return Threads{}, fmt.Errorf("invalid Threads line %q", v)
|
||||
}
|
||||
|
||||
return Threads{
|
||||
Threads: v[0],
|
||||
FullCnt: v[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
|
||||
if len(v) != 12 {
|
||||
return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
|
||||
}
|
||||
|
||||
return ReadAheadCache{
|
||||
CacheSize: v[0],
|
||||
CacheHistogram: v[1:11],
|
||||
NotFound: v[11],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseNetwork(v []uint64) (Network, error) {
|
||||
if len(v) != 4 {
|
||||
return Network{}, fmt.Errorf("invalid Network line %q", v)
|
||||
}
|
||||
|
||||
return Network{
|
||||
NetCount: v[0],
|
||||
UDPCount: v[1],
|
||||
TCPCount: v[2],
|
||||
TCPConnect: v[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseServerRPC(v []uint64) (ServerRPC, error) {
|
||||
if len(v) != 5 {
|
||||
return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
||||
}
|
||||
|
||||
return ServerRPC{
|
||||
RPCCount: v[0],
|
||||
BadCnt: v[1],
|
||||
BadFmt: v[2],
|
||||
BadAuth: v[3],
|
||||
BadcInt: v[4],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseClientRPC(v []uint64) (ClientRPC, error) {
|
||||
if len(v) != 3 {
|
||||
return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
||||
}
|
||||
|
||||
return ClientRPC{
|
||||
RPCCount: v[0],
|
||||
Retransmissions: v[1],
|
||||
AuthRefreshes: v[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseV2Stats(v []uint64) (V2Stats, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values || values != 18 {
|
||||
return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
|
||||
}
|
||||
|
||||
return V2Stats{
|
||||
Null: v[1],
|
||||
GetAttr: v[2],
|
||||
SetAttr: v[3],
|
||||
Root: v[4],
|
||||
Lookup: v[5],
|
||||
ReadLink: v[6],
|
||||
Read: v[7],
|
||||
WrCache: v[8],
|
||||
Write: v[9],
|
||||
Create: v[10],
|
||||
Remove: v[11],
|
||||
Rename: v[12],
|
||||
Link: v[13],
|
||||
SymLink: v[14],
|
||||
MkDir: v[15],
|
||||
RmDir: v[16],
|
||||
ReadDir: v[17],
|
||||
FsStat: v[18],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseV3Stats(v []uint64) (V3Stats, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values || values != 22 {
|
||||
return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
|
||||
}
|
||||
|
||||
return V3Stats{
|
||||
Null: v[1],
|
||||
GetAttr: v[2],
|
||||
SetAttr: v[3],
|
||||
Lookup: v[4],
|
||||
Access: v[5],
|
||||
ReadLink: v[6],
|
||||
Read: v[7],
|
||||
Write: v[8],
|
||||
Create: v[9],
|
||||
MkDir: v[10],
|
||||
SymLink: v[11],
|
||||
MkNod: v[12],
|
||||
Remove: v[13],
|
||||
RmDir: v[14],
|
||||
Rename: v[15],
|
||||
Link: v[16],
|
||||
ReadDir: v[17],
|
||||
ReadDirPlus: v[18],
|
||||
FsStat: v[19],
|
||||
FsInfo: v[20],
|
||||
PathConf: v[21],
|
||||
Commit: v[22],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values {
|
||||
return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
|
||||
}
|
||||
|
||||
// This function currently supports mapping 59 NFS v4 client stats. Older
|
||||
// kernels may emit fewer stats, so we must detect this and pad out the
|
||||
// values to match the expected slice size.
|
||||
if values < 59 {
|
||||
newValues := make([]uint64, 60)
|
||||
copy(newValues, v)
|
||||
v = newValues
|
||||
}
|
||||
|
||||
return ClientV4Stats{
|
||||
Null: v[1],
|
||||
Read: v[2],
|
||||
Write: v[3],
|
||||
Commit: v[4],
|
||||
Open: v[5],
|
||||
OpenConfirm: v[6],
|
||||
OpenNoattr: v[7],
|
||||
OpenDowngrade: v[8],
|
||||
Close: v[9],
|
||||
Setattr: v[10],
|
||||
FsInfo: v[11],
|
||||
Renew: v[12],
|
||||
SetClientId: v[13],
|
||||
SetClientIdConfirm: v[14],
|
||||
Lock: v[15],
|
||||
Lockt: v[16],
|
||||
Locku: v[17],
|
||||
Access: v[18],
|
||||
Getattr: v[19],
|
||||
Lookup: v[20],
|
||||
LookupRoot: v[21],
|
||||
Remove: v[22],
|
||||
Rename: v[23],
|
||||
Link: v[24],
|
||||
Symlink: v[25],
|
||||
Create: v[26],
|
||||
Pathconf: v[27],
|
||||
StatFs: v[28],
|
||||
ReadLink: v[29],
|
||||
ReadDir: v[30],
|
||||
ServerCaps: v[31],
|
||||
DelegReturn: v[32],
|
||||
GetAcl: v[33],
|
||||
SetAcl: v[34],
|
||||
FsLocations: v[35],
|
||||
ReleaseLockowner: v[36],
|
||||
Secinfo: v[37],
|
||||
FsidPresent: v[38],
|
||||
ExchangeId: v[39],
|
||||
CreateSession: v[40],
|
||||
DestroySession: v[41],
|
||||
Sequence: v[42],
|
||||
GetLeaseTime: v[43],
|
||||
ReclaimComplete: v[44],
|
||||
LayoutGet: v[45],
|
||||
GetDeviceInfo: v[46],
|
||||
LayoutCommit: v[47],
|
||||
LayoutReturn: v[48],
|
||||
SecinfoNoName: v[49],
|
||||
TestStateId: v[50],
|
||||
FreeStateId: v[51],
|
||||
GetDeviceList: v[52],
|
||||
BindConnToSession: v[53],
|
||||
DestroyClientId: v[54],
|
||||
Seek: v[55],
|
||||
Allocate: v[56],
|
||||
DeAllocate: v[57],
|
||||
LayoutStats: v[58],
|
||||
Clone: v[59],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values || values != 2 {
|
||||
return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
|
||||
}
|
||||
|
||||
return ServerV4Stats{
|
||||
Null: v[1],
|
||||
Compound: v[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseV4Ops(v []uint64) (V4Ops, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values || values < 39 {
|
||||
return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
|
||||
}
|
||||
|
||||
stats := V4Ops{
|
||||
Op0Unused: v[1],
|
||||
Op1Unused: v[2],
|
||||
Op2Future: v[3],
|
||||
Access: v[4],
|
||||
Close: v[5],
|
||||
Commit: v[6],
|
||||
Create: v[7],
|
||||
DelegPurge: v[8],
|
||||
DelegReturn: v[9],
|
||||
GetAttr: v[10],
|
||||
GetFH: v[11],
|
||||
Link: v[12],
|
||||
Lock: v[13],
|
||||
Lockt: v[14],
|
||||
Locku: v[15],
|
||||
Lookup: v[16],
|
||||
LookupRoot: v[17],
|
||||
Nverify: v[18],
|
||||
Open: v[19],
|
||||
OpenAttr: v[20],
|
||||
OpenConfirm: v[21],
|
||||
OpenDgrd: v[22],
|
||||
PutFH: v[23],
|
||||
PutPubFH: v[24],
|
||||
PutRootFH: v[25],
|
||||
Read: v[26],
|
||||
ReadDir: v[27],
|
||||
ReadLink: v[28],
|
||||
Remove: v[29],
|
||||
Rename: v[30],
|
||||
Renew: v[31],
|
||||
RestoreFH: v[32],
|
||||
SaveFH: v[33],
|
||||
SecInfo: v[34],
|
||||
SetAttr: v[35],
|
||||
Verify: v[36],
|
||||
Write: v[37],
|
||||
RelLockOwner: v[38],
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
generated
vendored
Normal file
67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
|
||||
func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
|
||||
stats := &ClientRPCStats{}
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(scanner.Text())
|
||||
// require at least <key> <value>
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("invalid NFS metric line %q", line)
|
||||
}
|
||||
|
||||
values, err := util.ParseUint64s(parts[1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
|
||||
}
|
||||
|
||||
switch metricLine := parts[0]; metricLine {
|
||||
case "net":
|
||||
stats.Network, err = parseNetwork(values)
|
||||
case "rpc":
|
||||
stats.ClientRPC, err = parseClientRPC(values)
|
||||
case "proc2":
|
||||
stats.V2Stats, err = parseV2Stats(values)
|
||||
case "proc3":
|
||||
stats.V3Stats, err = parseV3Stats(values)
|
||||
case "proc4":
|
||||
stats.ClientV4Stats, err = parseClientV4Stats(values)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error scanning NFS file: %s", err)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
generated
vendored
Normal file
89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
|
||||
func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
|
||||
stats := &ServerRPCStats{}
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(scanner.Text())
|
||||
// require at least <key> <value>
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("invalid NFSd metric line %q", line)
|
||||
}
|
||||
label := parts[0]
|
||||
|
||||
var values []uint64
|
||||
var err error
|
||||
if label == "th" {
|
||||
if len(parts) < 3 {
|
||||
return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
|
||||
}
|
||||
values, err = util.ParseUint64s(parts[1:3])
|
||||
} else {
|
||||
values, err = util.ParseUint64s(parts[1:])
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
|
||||
}
|
||||
|
||||
switch metricLine := parts[0]; metricLine {
|
||||
case "rc":
|
||||
stats.ReplyCache, err = parseReplyCache(values)
|
||||
case "fh":
|
||||
stats.FileHandles, err = parseFileHandles(values)
|
||||
case "io":
|
||||
stats.InputOutput, err = parseInputOutput(values)
|
||||
case "th":
|
||||
stats.Threads, err = parseThreads(values)
|
||||
case "ra":
|
||||
stats.ReadAheadCache, err = parseReadAheadCache(values)
|
||||
case "net":
|
||||
stats.Network, err = parseNetwork(values)
|
||||
case "rpc":
|
||||
stats.ServerRPC, err = parseServerRPC(values)
|
||||
case "proc2":
|
||||
stats.V2Stats, err = parseV2Stats(values)
|
||||
case "proc3":
|
||||
stats.V3Stats, err = parseV3Stats(values)
|
||||
case "proc4":
|
||||
stats.ServerV4Stats, err = parseServerV4Stats(values)
|
||||
case "proc4ops":
|
||||
stats.V4Ops, err = parseV4Ops(values)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error scanning NFSd file: %s", err)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
28
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
28
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@@ -1,6 +1,20 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -113,7 +127,7 @@ func (p Proc) CmdLine() ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
|
||||
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
|
||||
}
|
||||
|
||||
// Comm returns the command name of a process.
|
||||
@@ -192,6 +206,18 @@ func (p Proc) FileDescriptorsLen() (int, error) {
|
||||
return len(fds), nil
|
||||
}
|
||||
|
||||
// MountStats retrieves statistics and configuration for mount points in a
|
||||
// process's namespace.
|
||||
func (p Proc) MountStats() ([]*Mount, error) {
|
||||
f, err := os.Open(p.path("mountstats"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseMountStats(f)
|
||||
}
|
||||
|
||||
func (p Proc) fileDescriptors() ([]string, error) {
|
||||
d, err := os.Open(p.path("fd"))
|
||||
if err != nil {
|
||||
|
||||
18
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
18
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
@@ -1,3 +1,16 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
@@ -47,9 +60,6 @@ func (p Proc) NewIO() (ProcIO, error) {
|
||||
|
||||
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
||||
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
||||
if err != nil {
|
||||
return pio, err
|
||||
}
|
||||
|
||||
return pio, nil
|
||||
return pio, err
|
||||
}
|
||||
|
||||
51
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
51
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
@@ -1,3 +1,16 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
@@ -13,46 +26,46 @@ import (
|
||||
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
|
||||
type ProcLimits struct {
|
||||
// CPU time limit in seconds.
|
||||
CPUTime int
|
||||
CPUTime int64
|
||||
// Maximum size of files that the process may create.
|
||||
FileSize int
|
||||
FileSize int64
|
||||
// Maximum size of the process's data segment (initialized data,
|
||||
// uninitialized data, and heap).
|
||||
DataSize int
|
||||
DataSize int64
|
||||
// Maximum size of the process stack in bytes.
|
||||
StackSize int
|
||||
StackSize int64
|
||||
// Maximum size of a core file.
|
||||
CoreFileSize int
|
||||
CoreFileSize int64
|
||||
// Limit of the process's resident set in pages.
|
||||
ResidentSet int
|
||||
ResidentSet int64
|
||||
// Maximum number of processes that can be created for the real user ID of
|
||||
// the calling process.
|
||||
Processes int
|
||||
Processes int64
|
||||
// Value one greater than the maximum file descriptor number that can be
|
||||
// opened by this process.
|
||||
OpenFiles int
|
||||
OpenFiles int64
|
||||
// Maximum number of bytes of memory that may be locked into RAM.
|
||||
LockedMemory int
|
||||
LockedMemory int64
|
||||
// Maximum size of the process's virtual memory address space in bytes.
|
||||
AddressSpace int
|
||||
AddressSpace int64
|
||||
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
|
||||
// this process may establish.
|
||||
FileLocks int
|
||||
FileLocks int64
|
||||
// Limit of signals that may be queued for the real user ID of the calling
|
||||
// process.
|
||||
PendingSignals int
|
||||
PendingSignals int64
|
||||
// Limit on the number of bytes that can be allocated for POSIX message
|
||||
// queues for the real user ID of the calling process.
|
||||
MsqqueueSize int
|
||||
MsqqueueSize int64
|
||||
// Limit of the nice priority set using setpriority(2) or nice(2).
|
||||
NicePriority int
|
||||
NicePriority int64
|
||||
// Limit of the real-time priority set using sched_setscheduler(2) or
|
||||
// sched_setparam(2).
|
||||
RealtimePriority int
|
||||
RealtimePriority int64
|
||||
// Limit (in microseconds) on the amount of CPU time that a process
|
||||
// scheduled under a real-time scheduling policy may consume without making
|
||||
// a blocking system call.
|
||||
RealtimeTimeout int
|
||||
RealtimeTimeout int64
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -125,13 +138,13 @@ func (p Proc) NewLimits() (ProcLimits, error) {
|
||||
return l, s.Err()
|
||||
}
|
||||
|
||||
func parseInt(s string) (int, error) {
|
||||
func parseInt(s string) (int64, error) {
|
||||
if s == limitsUnlimited {
|
||||
return -1, nil
|
||||
}
|
||||
i, err := strconv.ParseInt(s, 10, 32)
|
||||
i, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
|
||||
}
|
||||
return int(i), nil
|
||||
return i, nil
|
||||
}
|
||||
|
||||
68
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
Normal file
68
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Namespace represents a single namespace of a process.
|
||||
type Namespace struct {
|
||||
Type string // Namespace type.
|
||||
Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
|
||||
}
|
||||
|
||||
// Namespaces contains all of the namespaces that the process is contained in.
|
||||
type Namespaces map[string]Namespace
|
||||
|
||||
// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
|
||||
// process is a member.
|
||||
func (p Proc) NewNamespaces() (Namespaces, error) {
|
||||
d, err := os.Open(p.path("ns"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
|
||||
}
|
||||
|
||||
ns := make(Namespaces, len(names))
|
||||
for _, name := range names {
|
||||
target, err := os.Readlink(p.path("ns", name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fields := strings.SplitN(target, ":", 2)
|
||||
if len(fields) != 2 {
|
||||
return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
|
||||
}
|
||||
|
||||
typ := fields[0]
|
||||
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
|
||||
}
|
||||
|
||||
ns[name] = Namespace{typ, uint32(inode)}
|
||||
}
|
||||
|
||||
return ns, nil
|
||||
}
|
||||
13
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
13
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@@ -1,3 +1,16 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
|
||||
206
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
206
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
@@ -1,17 +1,81 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CPUStat shows how much time the cpu spend in various stages.
|
||||
type CPUStat struct {
|
||||
User float64
|
||||
Nice float64
|
||||
System float64
|
||||
Idle float64
|
||||
Iowait float64
|
||||
IRQ float64
|
||||
SoftIRQ float64
|
||||
Steal float64
|
||||
Guest float64
|
||||
GuestNice float64
|
||||
}
|
||||
|
||||
// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
|
||||
// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
|
||||
// It is possible to get per-cpu stats by reading /proc/softirqs
|
||||
type SoftIRQStat struct {
|
||||
Hi uint64
|
||||
Timer uint64
|
||||
NetTx uint64
|
||||
NetRx uint64
|
||||
Block uint64
|
||||
BlockIoPoll uint64
|
||||
Tasklet uint64
|
||||
Sched uint64
|
||||
Hrtimer uint64
|
||||
Rcu uint64
|
||||
}
|
||||
|
||||
// Stat represents kernel/system statistics.
|
||||
type Stat struct {
|
||||
// Boot time in seconds since the Epoch.
|
||||
BootTime int64
|
||||
BootTime uint64
|
||||
// Summed up cpu statistics.
|
||||
CPUTotal CPUStat
|
||||
// Per-CPU statistics.
|
||||
CPU []CPUStat
|
||||
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
|
||||
IRQTotal uint64
|
||||
// Number of times a numbered IRQ was triggered.
|
||||
IRQ []uint64
|
||||
// Number of times a context switch happened.
|
||||
ContextSwitches uint64
|
||||
// Number of times a process was created.
|
||||
ProcessCreated uint64
|
||||
// Number of processes currently running.
|
||||
ProcessesRunning uint64
|
||||
// Number of processes currently blocked (waiting for IO).
|
||||
ProcessesBlocked uint64
|
||||
// Number of times a softirq was scheduled.
|
||||
SoftIRQTotal uint64
|
||||
// Detailed softirq statistics.
|
||||
SoftIRQ SoftIRQStat
|
||||
}
|
||||
|
||||
// NewStat returns kernel/system statistics read from /proc/stat.
|
||||
@@ -24,33 +88,145 @@ func NewStat() (Stat, error) {
|
||||
return fs.NewStat()
|
||||
}
|
||||
|
||||
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
||||
func parseCPUStat(line string) (CPUStat, int64, error) {
|
||||
cpuStat := CPUStat{}
|
||||
var cpu string
|
||||
|
||||
count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
|
||||
&cpu,
|
||||
&cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
|
||||
&cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
|
||||
&cpuStat.Guest, &cpuStat.GuestNice)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
|
||||
}
|
||||
if count == 0 {
|
||||
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
|
||||
}
|
||||
|
||||
cpuStat.User /= userHZ
|
||||
cpuStat.Nice /= userHZ
|
||||
cpuStat.System /= userHZ
|
||||
cpuStat.Idle /= userHZ
|
||||
cpuStat.Iowait /= userHZ
|
||||
cpuStat.IRQ /= userHZ
|
||||
cpuStat.SoftIRQ /= userHZ
|
||||
cpuStat.Steal /= userHZ
|
||||
cpuStat.Guest /= userHZ
|
||||
cpuStat.GuestNice /= userHZ
|
||||
|
||||
if cpu == "cpu" {
|
||||
return cpuStat, -1, nil
|
||||
}
|
||||
|
||||
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
|
||||
if err != nil {
|
||||
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
|
||||
}
|
||||
|
||||
return cpuStat, cpuID, nil
|
||||
}
|
||||
|
||||
// Parse a softirq line.
|
||||
func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
||||
softIRQStat := SoftIRQStat{}
|
||||
var total uint64
|
||||
var prefix string
|
||||
|
||||
_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
|
||||
&prefix, &total,
|
||||
&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
|
||||
&softIRQStat.Block, &softIRQStat.BlockIoPoll,
|
||||
&softIRQStat.Tasklet, &softIRQStat.Sched,
|
||||
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
|
||||
|
||||
if err != nil {
|
||||
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
|
||||
}
|
||||
|
||||
return softIRQStat, total, nil
|
||||
}
|
||||
|
||||
// NewStat returns an information about current kernel/system statistics.
|
||||
func (fs FS) NewStat() (Stat, error) {
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||
|
||||
f, err := os.Open(fs.Path("stat"))
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
if !strings.HasPrefix(line, "btime") {
|
||||
stat := Stat{}
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(scanner.Text())
|
||||
// require at least <key> <value>
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 2 {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
|
||||
switch {
|
||||
case parts[0] == "btime":
|
||||
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "intr":
|
||||
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
|
||||
}
|
||||
numberedIRQs := parts[2:]
|
||||
stat.IRQ = make([]uint64, len(numberedIRQs))
|
||||
for i, count := range numberedIRQs {
|
||||
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "ctxt":
|
||||
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "processes":
|
||||
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_running":
|
||||
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_blocked":
|
||||
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "softirq":
|
||||
softIRQStats, total, err := parseSoftIRQStat(line)
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
stat.SoftIRQTotal = total
|
||||
stat.SoftIRQ = softIRQStats
|
||||
case strings.HasPrefix(parts[0], "cpu"):
|
||||
cpuStat, cpuID, err := parseCPUStat(line)
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
if cpuID == -1 {
|
||||
stat.CPUTotal = cpuStat
|
||||
} else {
|
||||
for int64(len(stat.CPU)) <= cpuID {
|
||||
stat.CPU = append(stat.CPU, CPUStat{})
|
||||
}
|
||||
stat.CPU[cpuID] = cpuStat
|
||||
}
|
||||
}
|
||||
i, err := strconv.ParseInt(fields[1], 10, 32)
|
||||
if err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
|
||||
}
|
||||
return Stat{BootTime: i}, nil
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
|
||||
}
|
||||
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
187
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
Normal file
187
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// XfrmStat models the contents of /proc/net/xfrm_stat.
|
||||
type XfrmStat struct {
|
||||
// All errors which are not matched by other
|
||||
XfrmInError int
|
||||
// No buffer is left
|
||||
XfrmInBufferError int
|
||||
// Header Error
|
||||
XfrmInHdrError int
|
||||
// No state found
|
||||
// i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
|
||||
XfrmInNoStates int
|
||||
// Transformation protocol specific error
|
||||
// e.g. SA Key is wrong
|
||||
XfrmInStateProtoError int
|
||||
// Transformation mode specific error
|
||||
XfrmInStateModeError int
|
||||
// Sequence error
|
||||
// e.g. sequence number is out of window
|
||||
XfrmInStateSeqError int
|
||||
// State is expired
|
||||
XfrmInStateExpired int
|
||||
// State has mismatch option
|
||||
// e.g. UDP encapsulation type is mismatched
|
||||
XfrmInStateMismatch int
|
||||
// State is invalid
|
||||
XfrmInStateInvalid int
|
||||
// No matching template for states
|
||||
// e.g. Inbound SAs are correct but SP rule is wrong
|
||||
XfrmInTmplMismatch int
|
||||
// No policy is found for states
|
||||
// e.g. Inbound SAs are correct but no SP is found
|
||||
XfrmInNoPols int
|
||||
// Policy discards
|
||||
XfrmInPolBlock int
|
||||
// Policy error
|
||||
XfrmInPolError int
|
||||
// All errors which are not matched by others
|
||||
XfrmOutError int
|
||||
// Bundle generation error
|
||||
XfrmOutBundleGenError int
|
||||
// Bundle check error
|
||||
XfrmOutBundleCheckError int
|
||||
// No state was found
|
||||
XfrmOutNoStates int
|
||||
// Transformation protocol specific error
|
||||
XfrmOutStateProtoError int
|
||||
// Transportation mode specific error
|
||||
XfrmOutStateModeError int
|
||||
// Sequence error
|
||||
// i.e sequence number overflow
|
||||
XfrmOutStateSeqError int
|
||||
// State is expired
|
||||
XfrmOutStateExpired int
|
||||
// Policy discads
|
||||
XfrmOutPolBlock int
|
||||
// Policy is dead
|
||||
XfrmOutPolDead int
|
||||
// Policy Error
|
||||
XfrmOutPolError int
|
||||
XfrmFwdHdrError int
|
||||
XfrmOutStateInvalid int
|
||||
XfrmAcquireError int
|
||||
}
|
||||
|
||||
// NewXfrmStat reads the xfrm_stat statistics.
|
||||
func NewXfrmStat() (XfrmStat, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return XfrmStat{}, err
|
||||
}
|
||||
|
||||
return fs.NewXfrmStat()
|
||||
}
|
||||
|
||||
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
|
||||
func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
||||
file, err := os.Open(fs.Path("net/xfrm_stat"))
|
||||
if err != nil {
|
||||
return XfrmStat{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var (
|
||||
x = XfrmStat{}
|
||||
s = bufio.NewScanner(file)
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
|
||||
if len(fields) != 2 {
|
||||
return XfrmStat{}, fmt.Errorf(
|
||||
"couldnt parse %s line %s", file.Name(), s.Text())
|
||||
}
|
||||
|
||||
name := fields[0]
|
||||
value, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return XfrmStat{}, err
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "XfrmInError":
|
||||
x.XfrmInError = value
|
||||
case "XfrmInBufferError":
|
||||
x.XfrmInBufferError = value
|
||||
case "XfrmInHdrError":
|
||||
x.XfrmInHdrError = value
|
||||
case "XfrmInNoStates":
|
||||
x.XfrmInNoStates = value
|
||||
case "XfrmInStateProtoError":
|
||||
x.XfrmInStateProtoError = value
|
||||
case "XfrmInStateModeError":
|
||||
x.XfrmInStateModeError = value
|
||||
case "XfrmInStateSeqError":
|
||||
x.XfrmInStateSeqError = value
|
||||
case "XfrmInStateExpired":
|
||||
x.XfrmInStateExpired = value
|
||||
case "XfrmInStateInvalid":
|
||||
x.XfrmInStateInvalid = value
|
||||
case "XfrmInTmplMismatch":
|
||||
x.XfrmInTmplMismatch = value
|
||||
case "XfrmInNoPols":
|
||||
x.XfrmInNoPols = value
|
||||
case "XfrmInPolBlock":
|
||||
x.XfrmInPolBlock = value
|
||||
case "XfrmInPolError":
|
||||
x.XfrmInPolError = value
|
||||
case "XfrmOutError":
|
||||
x.XfrmOutError = value
|
||||
case "XfrmInStateMismatch":
|
||||
x.XfrmInStateMismatch = value
|
||||
case "XfrmOutBundleGenError":
|
||||
x.XfrmOutBundleGenError = value
|
||||
case "XfrmOutBundleCheckError":
|
||||
x.XfrmOutBundleCheckError = value
|
||||
case "XfrmOutNoStates":
|
||||
x.XfrmOutNoStates = value
|
||||
case "XfrmOutStateProtoError":
|
||||
x.XfrmOutStateProtoError = value
|
||||
case "XfrmOutStateModeError":
|
||||
x.XfrmOutStateModeError = value
|
||||
case "XfrmOutStateSeqError":
|
||||
x.XfrmOutStateSeqError = value
|
||||
case "XfrmOutStateExpired":
|
||||
x.XfrmOutStateExpired = value
|
||||
case "XfrmOutPolBlock":
|
||||
x.XfrmOutPolBlock = value
|
||||
case "XfrmOutPolDead":
|
||||
x.XfrmOutPolDead = value
|
||||
case "XfrmOutPolError":
|
||||
x.XfrmOutPolError = value
|
||||
case "XfrmFwdHdrError":
|
||||
x.XfrmFwdHdrError = value
|
||||
case "XfrmOutStateInvalid":
|
||||
x.XfrmOutStateInvalid = value
|
||||
case "XfrmAcquireError":
|
||||
x.XfrmAcquireError = value
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return x, s.Err()
|
||||
}
|
||||
330
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
Normal file
330
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
Normal file
@@ -0,0 +1,330 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package xfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// ParseStats parses a Stats from an input io.Reader, using the format
|
||||
// found in /proc/fs/xfs/stat.
|
||||
func ParseStats(r io.Reader) (*Stats, error) {
|
||||
const (
|
||||
// Fields parsed into stats structures.
|
||||
fieldExtentAlloc = "extent_alloc"
|
||||
fieldAbt = "abt"
|
||||
fieldBlkMap = "blk_map"
|
||||
fieldBmbt = "bmbt"
|
||||
fieldDir = "dir"
|
||||
fieldTrans = "trans"
|
||||
fieldIg = "ig"
|
||||
fieldLog = "log"
|
||||
fieldRw = "rw"
|
||||
fieldAttr = "attr"
|
||||
fieldIcluster = "icluster"
|
||||
fieldVnodes = "vnodes"
|
||||
fieldBuf = "buf"
|
||||
fieldXpc = "xpc"
|
||||
|
||||
// Unimplemented at this time due to lack of documentation.
|
||||
fieldPushAil = "push_ail"
|
||||
fieldXstrat = "xstrat"
|
||||
fieldAbtb2 = "abtb2"
|
||||
fieldAbtc2 = "abtc2"
|
||||
fieldBmbt2 = "bmbt2"
|
||||
fieldIbt2 = "ibt2"
|
||||
fieldFibt2 = "fibt2"
|
||||
fieldQm = "qm"
|
||||
fieldDebug = "debug"
|
||||
)
|
||||
|
||||
var xfss Stats
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Expect at least a string label and a single integer value, ex:
|
||||
// - abt 0
|
||||
// - rw 1 2
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) < 2 {
|
||||
continue
|
||||
}
|
||||
label := ss[0]
|
||||
|
||||
// Extended precision counters are uint64 values.
|
||||
if label == fieldXpc {
|
||||
us, err := util.ParseUint64s(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// All other counters are uint32 values.
|
||||
us, err := util.ParseUint32s(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch label {
|
||||
case fieldExtentAlloc:
|
||||
xfss.ExtentAllocation, err = extentAllocationStats(us)
|
||||
case fieldAbt:
|
||||
xfss.AllocationBTree, err = btreeStats(us)
|
||||
case fieldBlkMap:
|
||||
xfss.BlockMapping, err = blockMappingStats(us)
|
||||
case fieldBmbt:
|
||||
xfss.BlockMapBTree, err = btreeStats(us)
|
||||
case fieldDir:
|
||||
xfss.DirectoryOperation, err = directoryOperationStats(us)
|
||||
case fieldTrans:
|
||||
xfss.Transaction, err = transactionStats(us)
|
||||
case fieldIg:
|
||||
xfss.InodeOperation, err = inodeOperationStats(us)
|
||||
case fieldLog:
|
||||
xfss.LogOperation, err = logOperationStats(us)
|
||||
case fieldRw:
|
||||
xfss.ReadWrite, err = readWriteStats(us)
|
||||
case fieldAttr:
|
||||
xfss.AttributeOperation, err = attributeOperationStats(us)
|
||||
case fieldIcluster:
|
||||
xfss.InodeClustering, err = inodeClusteringStats(us)
|
||||
case fieldVnodes:
|
||||
xfss.Vnode, err = vnodeStats(us)
|
||||
case fieldBuf:
|
||||
xfss.Buffer, err = bufferStats(us)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &xfss, s.Err()
|
||||
}
|
||||
|
||||
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
|
||||
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
|
||||
}
|
||||
|
||||
return ExtentAllocationStats{
|
||||
ExtentsAllocated: us[0],
|
||||
BlocksAllocated: us[1],
|
||||
ExtentsFreed: us[2],
|
||||
BlocksFreed: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// btreeStats builds a BTreeStats from a slice of uint32s.
|
||||
func btreeStats(us []uint32) (BTreeStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
|
||||
}
|
||||
|
||||
return BTreeStats{
|
||||
Lookups: us[0],
|
||||
Compares: us[1],
|
||||
RecordsInserted: us[2],
|
||||
RecordsDeleted: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
|
||||
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
|
||||
if l := len(us); l != 7 {
|
||||
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
|
||||
}
|
||||
|
||||
return BlockMappingStats{
|
||||
Reads: us[0],
|
||||
Writes: us[1],
|
||||
Unmaps: us[2],
|
||||
ExtentListInsertions: us[3],
|
||||
ExtentListDeletions: us[4],
|
||||
ExtentListLookups: us[5],
|
||||
ExtentListCompares: us[6],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
|
||||
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
|
||||
}
|
||||
|
||||
return DirectoryOperationStats{
|
||||
Lookups: us[0],
|
||||
Creates: us[1],
|
||||
Removes: us[2],
|
||||
Getdents: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TransactionStats builds a TransactionStats from a slice of uint32s.
|
||||
func transactionStats(us []uint32) (TransactionStats, error) {
|
||||
if l := len(us); l != 3 {
|
||||
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
|
||||
}
|
||||
|
||||
return TransactionStats{
|
||||
Sync: us[0],
|
||||
Async: us[1],
|
||||
Empty: us[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
|
||||
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
|
||||
if l := len(us); l != 7 {
|
||||
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
|
||||
}
|
||||
|
||||
return InodeOperationStats{
|
||||
Attempts: us[0],
|
||||
Found: us[1],
|
||||
Recycle: us[2],
|
||||
Missed: us[3],
|
||||
Duplicate: us[4],
|
||||
Reclaims: us[5],
|
||||
AttributeChange: us[6],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
|
||||
func logOperationStats(us []uint32) (LogOperationStats, error) {
|
||||
if l := len(us); l != 5 {
|
||||
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
|
||||
}
|
||||
|
||||
return LogOperationStats{
|
||||
Writes: us[0],
|
||||
Blocks: us[1],
|
||||
NoInternalBuffers: us[2],
|
||||
Force: us[3],
|
||||
ForceSleep: us[4],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
|
||||
func readWriteStats(us []uint32) (ReadWriteStats, error) {
|
||||
if l := len(us); l != 2 {
|
||||
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
|
||||
}
|
||||
|
||||
return ReadWriteStats{
|
||||
Read: us[0],
|
||||
Write: us[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
|
||||
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
|
||||
}
|
||||
|
||||
return AttributeOperationStats{
|
||||
Get: us[0],
|
||||
Set: us[1],
|
||||
Remove: us[2],
|
||||
List: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
|
||||
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
|
||||
if l := len(us); l != 3 {
|
||||
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
|
||||
}
|
||||
|
||||
return InodeClusteringStats{
|
||||
Iflush: us[0],
|
||||
Flush: us[1],
|
||||
FlushInode: us[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// VnodeStats builds a VnodeStats from a slice of uint32s.
|
||||
func vnodeStats(us []uint32) (VnodeStats, error) {
|
||||
// The attribute "Free" appears to not be available on older XFS
|
||||
// stats versions. Therefore, 7 or 8 elements may appear in
|
||||
// this slice.
|
||||
l := len(us)
|
||||
if l != 7 && l != 8 {
|
||||
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
|
||||
}
|
||||
|
||||
s := VnodeStats{
|
||||
Active: us[0],
|
||||
Allocate: us[1],
|
||||
Get: us[2],
|
||||
Hold: us[3],
|
||||
Release: us[4],
|
||||
Reclaim: us[5],
|
||||
Remove: us[6],
|
||||
}
|
||||
|
||||
// Skip adding free, unless it is present. The zero value will
|
||||
// be used in place of an actual count.
|
||||
if l == 7 {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
s.Free = us[7]
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// BufferStats builds a BufferStats from a slice of uint32s.
|
||||
func bufferStats(us []uint32) (BufferStats, error) {
|
||||
if l := len(us); l != 9 {
|
||||
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
|
||||
}
|
||||
|
||||
return BufferStats{
|
||||
Get: us[0],
|
||||
Create: us[1],
|
||||
GetLocked: us[2],
|
||||
GetLockedWaited: us[3],
|
||||
BusyLocked: us[4],
|
||||
MissLocked: us[5],
|
||||
PageRetries: us[6],
|
||||
PageFound: us[7],
|
||||
GetRead: us[8],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
|
||||
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
|
||||
if l := len(us); l != 3 {
|
||||
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
|
||||
}
|
||||
|
||||
return ExtendedPrecisionStats{
|
||||
FlushBytes: us[0],
|
||||
WriteBytes: us[1],
|
||||
ReadBytes: us[2],
|
||||
}, nil
|
||||
}
|
||||
163
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
Normal file
163
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package xfs provides access to statistics exposed by the XFS filesystem.
|
||||
package xfs
|
||||
|
||||
// Stats contains XFS filesystem runtime statistics, parsed from
|
||||
// /proc/fs/xfs/stat.
|
||||
//
|
||||
// The names and meanings of each statistic were taken from
|
||||
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
|
||||
// kernel source. Most counters are uint32s (same data types used in
|
||||
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
|
||||
type Stats struct {
|
||||
// The name of the filesystem used to source these statistics.
|
||||
// If empty, this indicates aggregated statistics for all XFS
|
||||
// filesystems on the host.
|
||||
Name string
|
||||
|
||||
ExtentAllocation ExtentAllocationStats
|
||||
AllocationBTree BTreeStats
|
||||
BlockMapping BlockMappingStats
|
||||
BlockMapBTree BTreeStats
|
||||
DirectoryOperation DirectoryOperationStats
|
||||
Transaction TransactionStats
|
||||
InodeOperation InodeOperationStats
|
||||
LogOperation LogOperationStats
|
||||
ReadWrite ReadWriteStats
|
||||
AttributeOperation AttributeOperationStats
|
||||
InodeClustering InodeClusteringStats
|
||||
Vnode VnodeStats
|
||||
Buffer BufferStats
|
||||
ExtendedPrecision ExtendedPrecisionStats
|
||||
}
|
||||
|
||||
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
|
||||
type ExtentAllocationStats struct {
|
||||
ExtentsAllocated uint32
|
||||
BlocksAllocated uint32
|
||||
ExtentsFreed uint32
|
||||
BlocksFreed uint32
|
||||
}
|
||||
|
||||
// BTreeStats contains statistics regarding an XFS internal B-tree.
|
||||
type BTreeStats struct {
|
||||
Lookups uint32
|
||||
Compares uint32
|
||||
RecordsInserted uint32
|
||||
RecordsDeleted uint32
|
||||
}
|
||||
|
||||
// BlockMappingStats contains statistics regarding XFS block maps.
|
||||
type BlockMappingStats struct {
|
||||
Reads uint32
|
||||
Writes uint32
|
||||
Unmaps uint32
|
||||
ExtentListInsertions uint32
|
||||
ExtentListDeletions uint32
|
||||
ExtentListLookups uint32
|
||||
ExtentListCompares uint32
|
||||
}
|
||||
|
||||
// DirectoryOperationStats contains statistics regarding XFS directory entries.
|
||||
type DirectoryOperationStats struct {
|
||||
Lookups uint32
|
||||
Creates uint32
|
||||
Removes uint32
|
||||
Getdents uint32
|
||||
}
|
||||
|
||||
// TransactionStats contains statistics regarding XFS metadata transactions.
|
||||
type TransactionStats struct {
|
||||
Sync uint32
|
||||
Async uint32
|
||||
Empty uint32
|
||||
}
|
||||
|
||||
// InodeOperationStats contains statistics regarding XFS inode operations.
|
||||
type InodeOperationStats struct {
|
||||
Attempts uint32
|
||||
Found uint32
|
||||
Recycle uint32
|
||||
Missed uint32
|
||||
Duplicate uint32
|
||||
Reclaims uint32
|
||||
AttributeChange uint32
|
||||
}
|
||||
|
||||
// LogOperationStats contains statistics regarding the XFS log buffer.
|
||||
type LogOperationStats struct {
|
||||
Writes uint32
|
||||
Blocks uint32
|
||||
NoInternalBuffers uint32
|
||||
Force uint32
|
||||
ForceSleep uint32
|
||||
}
|
||||
|
||||
// ReadWriteStats contains statistics regarding the number of read and write
|
||||
// system calls for XFS filesystems.
|
||||
type ReadWriteStats struct {
|
||||
Read uint32
|
||||
Write uint32
|
||||
}
|
||||
|
||||
// AttributeOperationStats contains statistics regarding manipulation of
|
||||
// XFS extended file attributes.
|
||||
type AttributeOperationStats struct {
|
||||
Get uint32
|
||||
Set uint32
|
||||
Remove uint32
|
||||
List uint32
|
||||
}
|
||||
|
||||
// InodeClusteringStats contains statistics regarding XFS inode clustering
|
||||
// operations.
|
||||
type InodeClusteringStats struct {
|
||||
Iflush uint32
|
||||
Flush uint32
|
||||
FlushInode uint32
|
||||
}
|
||||
|
||||
// VnodeStats contains statistics regarding XFS vnode operations.
|
||||
type VnodeStats struct {
|
||||
Active uint32
|
||||
Allocate uint32
|
||||
Get uint32
|
||||
Hold uint32
|
||||
Release uint32
|
||||
Reclaim uint32
|
||||
Remove uint32
|
||||
Free uint32
|
||||
}
|
||||
|
||||
// BufferStats contains statistics regarding XFS read/write I/O buffers.
|
||||
type BufferStats struct {
|
||||
Get uint32
|
||||
Create uint32
|
||||
GetLocked uint32
|
||||
GetLockedWaited uint32
|
||||
BusyLocked uint32
|
||||
MissLocked uint32
|
||||
PageRetries uint32
|
||||
PageFound uint32
|
||||
GetRead uint32
|
||||
}
|
||||
|
||||
// ExtendedPrecisionStats contains high precision counters used to track the
|
||||
// total number of bytes read, written, or flushed, during XFS operations.
|
||||
type ExtendedPrecisionStats struct {
|
||||
FlushBytes uint64
|
||||
WriteBytes uint64
|
||||
ReadBytes uint64
|
||||
}
|
||||
11
vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
11
vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build appengine gopherjs
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
return true
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user