Updating deps

Signed-off-by: Dave Henderson <dhenderson@gmail.com>
This commit is contained in:
Dave Henderson
2018-03-13 15:51:54 -04:00
committed by Martin Lindhe
parent bda7dd18cf
commit df0db7a54f
341 changed files with 76222 additions and 25054 deletions

View File

@@ -267,7 +267,12 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
now := time.Now()
delegate := &responseWriterDelegator{ResponseWriter: w}
out := computeApproximateRequestSize(r)
out := make(chan int)
urlLen := 0
if r.URL != nil {
urlLen = len(r.URL.String())
}
go computeApproximateRequestSize(r, out, urlLen)
_, cn := w.(http.CloseNotifier)
_, fl := w.(http.Flusher)
@@ -292,37 +297,23 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
})
}
func computeApproximateRequestSize(r *http.Request) <-chan int {
// Get URL length in current go routine for avoiding a race condition.
// HandlerFunc that runs in parallel may modify the URL.
s := 0
if r.URL != nil {
s += len(r.URL.String())
func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
out := make(chan int, 1)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
go func() {
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
out <- s
close(out)
}()
return out
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
out <- s
}
type responseWriterDelegator struct {

View File

@@ -1,13 +0,0 @@
The Prometheus project was started by Matt T. Proud (emeritus) and
Julius Volz in 2012.
Maintainers of this repository:
* Björn Rabenstein <beorn@soundcloud.com>
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Björn Rabenstein <beorn@soundcloud.com>
* Matt T. Proud <matt.proud@gmail.com>
* Tobias Schmidt <ts@soundcloud.com>

View File

@@ -1,11 +0,0 @@
Maintainers of this repository:
* Fabian Reinartz <fabian@soundcloud.com>
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Björn Rabenstein <beorn@soundcloud.com>
* Fabian Reinartz <fabian@soundcloud.com>
* Julius Volz <julius.volz@gmail.com>
* Miguel Molina <hi@mvader.me>

View File

@@ -31,6 +31,7 @@ type Decoder interface {
Decode(*dto.MetricFamily) error
}
// DecodeOptions contains options used by the Decoder and in sample extraction.
type DecodeOptions struct {
// Timestamp is added to each value from the stream that has no explicit timestamp set.
Timestamp model.Time
@@ -142,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
return nil
}
// SampleDecoder wraps a Decoder to extract samples from the metric families
// decoded by the wrapped Decoder.
type SampleDecoder struct {
Dec Decoder
Opts *DecodeOptions
@@ -149,37 +152,51 @@ type SampleDecoder struct {
f dto.MetricFamily
}
// Decode calls the Decode method of the wrapped Decoder and then extracts the
// samples from the decoded MetricFamily into the provided model.Vector.
func (sd *SampleDecoder) Decode(s *model.Vector) error {
if err := sd.Dec.Decode(&sd.f); err != nil {
err := sd.Dec.Decode(&sd.f)
if err != nil {
return err
}
*s = extractSamples(&sd.f, sd.Opts)
return nil
*s, err = extractSamples(&sd.f, sd.Opts)
return err
}
// Extract samples builds a slice of samples from the provided metric families.
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
var all model.Vector
// ExtractSamples builds a slice of samples from the provided metric
// families. If an error occurs during sample extraction, it continues to
// extract from the remaining metric families. The returned error is the last
// error that has occured.
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
var (
all model.Vector
lastErr error
)
for _, f := range fams {
all = append(all, extractSamples(f, o)...)
some, err := extractSamples(f, o)
if err != nil {
lastErr = err
continue
}
all = append(all, some...)
}
return all
return all, lastErr
}
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
switch f.GetType() {
case dto.MetricType_COUNTER:
return extractCounter(o, f)
return extractCounter(o, f), nil
case dto.MetricType_GAUGE:
return extractGauge(o, f)
return extractGauge(o, f), nil
case dto.MetricType_SUMMARY:
return extractSummary(o, f)
return extractSummary(o, f), nil
case dto.MetricType_UNTYPED:
return extractUntyped(o, f)
return extractUntyped(o, f), nil
case dto.MetricType_HISTOGRAM:
return extractHistogram(o, f)
return extractHistogram(o, f), nil
}
panic("expfmt.extractSamples: unknown metric family type")
return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
}
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {

View File

@@ -11,27 +11,25 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// A package for reading and writing Prometheus metrics.
// Package expfmt contains tools for reading and writing Prometheus metrics.
package expfmt
// Format specifies the HTTP content type of the different wire protocols.
type Format string
// Constants to assemble the Content-Type values for the different wire protocols.
const (
TextVersion = "0.0.4"
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
// The Content-Type values for the different wire protocols.
FmtUnknown Format = `<unknown>`
FmtText Format = `text/plain; version=` + TextVersion
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
// fmtJSON2 is hidden as it is deprecated.
fmtJSON2 Format = `application/json; version=0.0.2`
)
const (

View File

@@ -315,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn {
if p.readTokenAsLabelValue(); p.err != nil {
return nil
}
if !model.LabelValue(p.currentToken.String()).IsValid() {
p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
return nil
}
p.currentLabelPair.Value = proto.String(p.currentToken.String())
// Special treatment of summaries:
// - Quantile labels are special, will result in dto.Quantile later.

View File

@@ -21,22 +21,22 @@ import (
"golang.org/x/sys/windows/svc/eventlog"
"github.com/Sirupsen/logrus"
"github.com/sirupsen/logrus"
)
func init() {
setEventlogFormatter = func(name string, debugAsInfo bool) error {
setEventlogFormatter = func(l logger, name string, debugAsInfo bool) error {
if name == "" {
return fmt.Errorf("missing name parameter")
}
fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
fmter, err := newEventlogger(name, debugAsInfo, l.entry.Logger.Formatter)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
origLogger.Errorf("can't connect logger to eventlog: %v", err)
l.Errorf("can't connect logger to eventlog: %v", err)
return err
}
origLogger.Formatter = fmter
l.entry.Logger.Formatter = fmter
return nil
}
}

View File

@@ -14,7 +14,6 @@
package log
import (
"flag"
"fmt"
"io"
"io/ioutil"
@@ -25,106 +24,46 @@ import (
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/sirupsen/logrus"
"gopkg.in/alecthomas/kingpin.v2"
)
type levelFlag string
// String implements flag.Value.
func (f levelFlag) String() string {
return fmt.Sprintf("%q", string(f))
}
// Set implements flag.Value.
func (f levelFlag) Set(level string) error {
l, err := logrus.ParseLevel(level)
if err != nil {
return err
}
origLogger.Level = l
return nil
}
// setSyslogFormatter is nil if the target architecture does not support syslog.
var setSyslogFormatter func(string, string) error
var setSyslogFormatter func(logger, string, string) error
// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
var setEventlogFormatter func(string, bool) error
var setEventlogFormatter func(logger, string, bool) error
func setJSONFormatter() {
origLogger.Formatter = &logrus.JSONFormatter{}
}
type logFormatFlag url.URL
// String implements flag.Value.
func (f logFormatFlag) String() string {
u := url.URL(f)
return fmt.Sprintf("%q", u.String())
type loggerSettings struct {
level string
format string
}
// Set implements flag.Value.
func (f logFormatFlag) Set(format string) error {
u, err := url.Parse(format)
func (s *loggerSettings) apply(ctx *kingpin.ParseContext) error {
err := baseLogger.SetLevel(s.level)
if err != nil {
return err
}
if u.Scheme != "logger" {
return fmt.Errorf("invalid scheme %s", u.Scheme)
}
jsonq := u.Query().Get("json")
if jsonq == "true" {
setJSONFormatter()
}
switch u.Opaque {
case "syslog":
if setSyslogFormatter == nil {
return fmt.Errorf("system does not support syslog")
}
appname := u.Query().Get("appname")
facility := u.Query().Get("local")
return setSyslogFormatter(appname, facility)
case "eventlog":
if setEventlogFormatter == nil {
return fmt.Errorf("system does not support eventlog")
}
name := u.Query().Get("name")
debugAsInfo := false
debugAsInfoRaw := u.Query().Get("debugAsInfo")
if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
debugAsInfo = parsedDebugAsInfo
}
return setEventlogFormatter(name, debugAsInfo)
case "stdout":
origLogger.Out = os.Stdout
case "stderr":
origLogger.Out = os.Stderr
default:
return fmt.Errorf("unsupported logger %q", u.Opaque)
}
return nil
err = baseLogger.SetFormat(s.format)
return err
}
func init() {
AddFlags(flag.CommandLine)
}
// AddFlags adds the flags used by this package to the given FlagSet. That's
// useful if working with a custom FlagSet. The init function of this package
// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call
// flag.Parse() to make the logging flags take effect.
func AddFlags(fs *flag.FlagSet) {
fs.Var(
levelFlag(origLogger.Level.String()),
"log.level",
"Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]",
)
fs.Var(
logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}),
"log.format",
`Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`,
)
// AddFlags adds the flags used by this package to the Kingpin application.
// To use the default Kingpin application, call AddFlags(kingpin.CommandLine)
func AddFlags(a *kingpin.Application) {
s := loggerSettings{}
a.Flag("log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]").
Default(origLogger.Level.String()).
StringVar(&s.level)
defaultFormat := url.URL{Scheme: "logger", Opaque: "stderr"}
a.Flag("log.format", `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`).
Default(defaultFormat.String()).
StringVar(&s.format)
a.Action(s.apply)
}
// Logger is the interface for loggers used in the Prometheus components.
@@ -150,6 +89,9 @@ type Logger interface {
Fatalf(string, ...interface{})
With(key string, value interface{}) Logger
SetFormat(string) error
SetLevel(string) error
}
type logger struct {
@@ -235,6 +177,58 @@ func (l logger) Fatalf(format string, args ...interface{}) {
l.sourced().Fatalf(format, args...)
}
func (l logger) SetLevel(level string) error {
lvl, err := logrus.ParseLevel(level)
if err != nil {
return err
}
l.entry.Logger.Level = lvl
return nil
}
func (l logger) SetFormat(format string) error {
u, err := url.Parse(format)
if err != nil {
return err
}
if u.Scheme != "logger" {
return fmt.Errorf("invalid scheme %s", u.Scheme)
}
jsonq := u.Query().Get("json")
if jsonq == "true" {
setJSONFormatter()
}
switch u.Opaque {
case "syslog":
if setSyslogFormatter == nil {
return fmt.Errorf("system does not support syslog")
}
appname := u.Query().Get("appname")
facility := u.Query().Get("local")
return setSyslogFormatter(l, appname, facility)
case "eventlog":
if setEventlogFormatter == nil {
return fmt.Errorf("system does not support eventlog")
}
name := u.Query().Get("name")
debugAsInfo := false
debugAsInfoRaw := u.Query().Get("debugAsInfo")
if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
debugAsInfo = parsedDebugAsInfo
}
return setEventlogFormatter(l, name, debugAsInfo)
case "stdout":
l.entry.Logger.Out = os.Stdout
case "stderr":
l.entry.Logger.Out = os.Stderr
default:
return fmt.Errorf("unsupported logger %q", u.Opaque)
}
return nil
}
// sourced adds a source field to the logger that contains
// the file name and line where the logging happened.
func (l logger) sourced() *logrus.Entry {
@@ -351,6 +345,11 @@ func Fatalf(format string, args ...interface{}) {
baseLogger.sourced().Fatalf(format, args...)
}
// AddHook adds hook to Prometheus' original logger.
func AddHook(hook logrus.Hook) {
origLogger.Hooks.Add(hook)
}
type errorLogWriter struct{}
func (errorLogWriter) Write(b []byte) (int, error) {

View File

@@ -20,11 +20,13 @@ import (
"log/syslog"
"os"
"github.com/Sirupsen/logrus"
"github.com/sirupsen/logrus"
)
var _ logrus.Formatter = (*syslogger)(nil)
func init() {
setSyslogFormatter = func(appname, local string) error {
setSyslogFormatter = func(l logger, appname, local string) error {
if appname == "" {
return fmt.Errorf("missing appname parameter")
}
@@ -32,18 +34,18 @@ func init() {
return fmt.Errorf("missing local parameter")
}
fmter, err := newSyslogger(appname, local, origLogger.Formatter)
fmter, err := newSyslogger(appname, local, l.entry.Logger.Formatter)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
origLogger.Errorf("can't connect logger to syslog: %v", err)
l.entry.Errorf("can't connect logger to syslog: %v", err)
return err
}
origLogger.Formatter = fmter
l.entry.Logger.Formatter = fmter
return nil
}
}
var ceeTag = []byte("@cee:")
var prefixTag []byte
type syslogger struct {
wrap logrus.Formatter
@@ -56,6 +58,11 @@ func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*sys
return nil, err
}
out, err := syslog.New(priority, appname)
_, isJSON := fmter.(*logrus.JSONFormatter)
if isJSON {
// add cee tag to json formatted syslogs
prefixTag = []byte("@cee:")
}
return &syslogger{
out: out,
wrap: fmter,
@@ -92,7 +99,7 @@ func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) {
}
// only append tag to data sent to syslog (line), not to what
// is returned
line := string(append(ceeTag, data...))
line := string(append(prefixTag, data...))
switch e.Level {
case logrus.PanicLevel:

View File

@@ -80,14 +80,18 @@ const (
QuantileLabel = "quantile"
)
// LabelNameRE is a regular expression matching valid label names.
// LabelNameRE is a regular expression matching valid label names. Note that the
// IsValid method of LabelName performs the same check but faster than a match
// with this regular expression.
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// A LabelName is a key for a LabelSet or Metric. It has a value associated
// therewith.
type LabelName string
// IsValid is true iff the label name matches the pattern of LabelNameRE.
// IsValid is true iff the label name matches the pattern of LabelNameRE. This
// method, however, does not use LabelNameRE for the check but a much faster
// hardcoded implementation.
func (ln LabelName) IsValid() bool {
if len(ln) == 0 {
return false
@@ -106,7 +110,7 @@ func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshal(&s); err != nil {
return err
}
if !LabelNameRE.MatchString(s) {
if !LabelName(s).IsValid() {
return fmt.Errorf("%q is not a valid label name", s)
}
*ln = LabelName(s)
@@ -119,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
if err := json.Unmarshal(b, &s); err != nil {
return err
}
if !LabelNameRE.MatchString(s) {
if !LabelName(s).IsValid() {
return fmt.Errorf("%q is not a valid label name", s)
}
*ln = LabelName(s)

View File

@@ -160,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
// LabelName as a string and does not call its UnmarshalJSON method.
// Thus, we have to replicate the behavior here.
for ln := range m {
if !LabelNameRE.MatchString(string(ln)) {
if !ln.IsValid() {
return fmt.Errorf("%q is not a valid label name", ln)
}
}

View File

@@ -21,8 +21,11 @@ import (
)
var (
separator = []byte{0}
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
separator = []byte{0}
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
)
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
@@ -41,7 +44,7 @@ func (m Metric) Before(o Metric) bool {
// Clone returns a copy of the Metric.
func (m Metric) Clone() Metric {
clone := Metric{}
clone := make(Metric, len(m))
for k, v := range m {
clone[k] = v
}
@@ -85,6 +88,8 @@ func (m Metric) FastFingerprint() Fingerprint {
}
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
func IsValidMetricName(n LabelValue) bool {
if len(n) == 0 {
return false

View File

@@ -163,9 +163,21 @@ func (t *Time) UnmarshalJSON(b []byte) error {
// This type should not propagate beyond the scope of input/output processing.
type Duration time.Duration
// Set implements pflag/flag.Value
func (d *Duration) Set(s string) error {
var err error
*d, err = ParseDuration(s)
return err
}
// Type implements pflag.Value
func (d *Duration) Type() string {
return "duration"
}
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
// StringToDuration parses a string into a time.Duration, assuming that a year
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
matches := durationRE.FindStringSubmatch(durationStr)
@@ -202,6 +214,9 @@ func (d Duration) String() string {
ms = int64(time.Duration(d) / time.Millisecond)
unit = "ms"
)
if ms == 0 {
return "0s"
}
factors := map[string]int64{
"y": 1000 * 60 * 60 * 24 * 365,
"w": 1000 * 60 * 60 * 24 * 7,

View File

@@ -22,6 +22,22 @@ import (
"strings"
)
var (
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
// of 0, which is possible to appear in a real SamplePair and thus not
// suitable to signal a non-existing SamplePair.
ZeroSamplePair = SamplePair{Timestamp: Earliest}
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
// of 0, which is possible to appear in a real Sample and thus not suitable
// to signal a non-existing Sample.
ZeroSample = Sample{Timestamp: Earliest}
)
// A SampleValue is a representation of a value for a given sample at a given
// time.
type SampleValue float64
@@ -113,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
if s.Value.Equal(o.Value) {
return false
}
return true
return s.Value.Equal(o.Value)
}
func (s Sample) String() string {

View File

@@ -1,20 +0,0 @@
The Prometheus project was started by Matt T. Proud (emeritus) and
Julius Volz in 2012.
Maintainers of this repository:
* Tobias Schmidt <ts@soundcloud.com>
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Armen Baghumian <abaghumian@noggin.com.au>
* Bjoern Rabenstein <beorn@soundcloud.com>
* David Cournapeau <cournape@gmail.com>
* Ji-Hoon, Seol <jihoon.seol@gmail.com>
* Jonas Große Sundrup <cherti@letopolis.de>
* Julius Volz <julius.volz@gmail.com>
* Matthias Rampke <mr@soundcloud.com>
* Nicky Gerritsen <nicky@streamone.nl>
* Rémi Audebert <contact@halfr.net>
* Tobias Schmidt <tobidt@gmail.com>

95
vendor/github.com/prometheus/procfs/buddyinfo.go generated vendored Normal file
View File

@@ -0,0 +1,95 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// A BuddyInfo is the details parsed from /proc/buddyinfo.
// The data is comprised of an array of free fragments of each size.
// The sizes are 2^n*PAGE_SIZE, where n is the array index.
type BuddyInfo struct {
Node string
Zone string
Sizes []float64
}
// NewBuddyInfo reads the buddyinfo statistics.
func NewBuddyInfo() ([]BuddyInfo, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewBuddyInfo()
}
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
file, err := os.Open(fs.Path("buddyinfo"))
if err != nil {
return nil, err
}
defer file.Close()
return parseBuddyInfo(file)
}
func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
var (
buddyInfo = []BuddyInfo{}
scanner = bufio.NewScanner(r)
bucketCount = -1
)
for scanner.Scan() {
var err error
line := scanner.Text()
parts := strings.Fields(line)
if len(parts) < 4 {
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
}
node := strings.TrimRight(parts[1], ",")
zone := strings.TrimRight(parts[3], ",")
arraySize := len(parts[4:])
if bucketCount == -1 {
bucketCount = arraySize
} else {
if bucketCount != arraySize {
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
}
}
sizes := make([]float64, arraySize)
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
}
}
buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
}
return buddyInfo, scanner.Err()
}

View File

@@ -1 +0,0 @@
/usr/bin/vim

View File

@@ -1 +0,0 @@
../../symlinktargets/abc

View File

@@ -1 +0,0 @@
../../symlinktargets/def

View File

@@ -1 +0,0 @@
../../symlinktargets/xyz

View File

@@ -1 +0,0 @@
../../symlinktargets/ghi

View File

@@ -1 +0,0 @@
../../symlinktargets/uvw

View File

@@ -1 +0,0 @@
../../symlinktargets/abc

View File

@@ -1 +0,0 @@
../../symlinktargets/def

View File

@@ -1 +0,0 @@
../../symlinktargets/ghi

View File

@@ -1 +0,0 @@
../../symlinktargets/uvw

View File

@@ -1 +0,0 @@
../../symlinktargets/xyz

View File

@@ -1 +0,0 @@
26231

View File

@@ -1,9 +1,25 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"os"
"path"
"github.com/prometheus/procfs/nfs"
"github.com/prometheus/procfs/xfs"
)
// FS represents the pseudo-filesystem proc, which provides an interface to
@@ -31,3 +47,36 @@ func NewFS(mountPoint string) (FS, error) {
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}
// XFSStats retrieves XFS filesystem runtime statistics.
func (fs FS) XFSStats() (*xfs.Stats, error) {
f, err := os.Open(fs.Path("fs/xfs/stat"))
if err != nil {
return nil, err
}
defer f.Close()
return xfs.ParseStats(f)
}
// NFSClientRPCStats retrieves NFS client RPC statistics.
func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
f, err := os.Open(fs.Path("net/rpc/nfs"))
if err != nil {
return nil, err
}
defer f.Close()
return nfs.ParseClientRPCStats(f)
}
// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
f, err := os.Open(fs.Path("net/rpc/nfsd"))
if err != nil {
return nil, err
}
defer f.Close()
return nfs.ParseServerRPCStats(f)
}

View File

@@ -0,0 +1,46 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import "strconv"
// ParseUint32s parses a slice of strings into a slice of uint32s.
func ParseUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}
// ParseUint64s parses a slice of strings into a slice of uint64s.
func ParseUint64s(ss []string) ([]uint64, error) {
us := make([]uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, u)
}
return us, nil
}

View File

@@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
@@ -31,14 +44,16 @@ type IPVSStats struct {
type IPVSBackendStatus struct {
// The local (virtual) IP address.
LocalAddress net.IP
// The local (virtual) port.
LocalPort uint16
// The transport protocol (TCP, UDP).
Proto string
// The remote (real) IP address.
RemoteAddress net.IP
// The local (virtual) port.
LocalPort uint16
// The remote (real) port.
RemotePort uint16
// The local firewall mark
LocalMark string
// The transport protocol (TCP, UDP).
Proto string
// The current number of active connections for this virtual/real address pair.
ActiveConn uint64
// The current number of inactive connections for this virtual/real address pair.
@@ -142,13 +157,14 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status []IPVSBackendStatus
scanner = bufio.NewScanner(file)
proto string
localMark string
localAddress net.IP
localPort uint16
err error
)
for scanner.Scan() {
fields := strings.Fields(string(scanner.Text()))
fields := strings.Fields(scanner.Text())
if len(fields) == 0 {
continue
}
@@ -160,10 +176,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
continue
}
proto = fields[0]
localMark = ""
localAddress, localPort, err = parseIPPort(fields[1])
if err != nil {
return nil, err
}
case fields[0] == "FWM":
if len(fields) < 2 {
continue
}
proto = fields[0]
localMark = fields[1]
localAddress = nil
localPort = 0
case fields[0] == "->":
if len(fields) < 6 {
continue
@@ -187,6 +212,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status = append(status, IPVSBackendStatus{
LocalAddress: localAddress,
LocalPort: localPort,
LocalMark: localMark,
RemoteAddress: remoteAddress,
RemotePort: remotePort,
Proto: proto,
@@ -200,22 +226,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
}
func parseIPPort(s string) (net.IP, uint16, error) {
tmp := strings.SplitN(s, ":", 2)
var (
ip net.IP
err error
)
if len(tmp) != 2 {
return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
switch len(s) {
case 13:
ip, err = hex.DecodeString(s[0:8])
if err != nil {
return nil, 0, err
}
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
}
default:
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
}
if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
portString := s[len(s)-4:]
if len(portString) != 4 {
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
}
ip, err := hex.DecodeString(tmp[0])
if err != nil {
return nil, 0, err
}
port, err := strconv.ParseUint(tmp[1], 16, 16)
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {
return nil, 0, err
}

View File

@@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (

569
vendor/github.com/prometheus/procfs/mountstats.go generated vendored Normal file
View File

@@ -0,0 +1,569 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
// heavily as a reference:
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
//
// Special thanks to Chris Siebenmann for all of his posts explaining the
// various statistics available for NFS.
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
"time"
)
// Constants shared between multiple functions.
const (
deviceEntryLen = 8
fieldBytesLen = 8
fieldEventsLen = 27
statVersion10 = "1.0"
statVersion11 = "1.1"
fieldTransport10Len = 10
fieldTransport11Len = 13
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
type Mount struct {
// Name of the device.
Device string
// The mount point of the device.
Mount string
// The filesystem type used by the device.
Type string
// If available additional statistics related to this Mount.
// Use a type assertion to determine if additional statistics are available.
Stats MountStats
}
// A MountStats is a type which contains detailed statistics for a specific
// type of Mount.
type MountStats interface {
mountStats()
}
// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
type MountStatsNFS struct {
// The version of statistics provided.
StatVersion string
// The age of the NFS mount.
Age time.Duration
// Statistics related to byte counters for various operations.
Bytes NFSBytesStats
// Statistics related to various NFS event occurrences.
Events NFSEventsStats
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
Transport NFSTransportStats
}
// mountStats implements MountStats.
func (m MountStatsNFS) mountStats() {}
// A NFSBytesStats contains statistics about the number of bytes read and written
// by an NFS client to and from an NFS server.
type NFSBytesStats struct {
// Number of bytes read using the read() syscall.
Read uint64
// Number of bytes written using the write() syscall.
Write uint64
// Number of bytes read using the read() syscall in O_DIRECT mode.
DirectRead uint64
// Number of bytes written using the write() syscall in O_DIRECT mode.
DirectWrite uint64
// Number of bytes read from the NFS server, in total.
ReadTotal uint64
// Number of bytes written to the NFS server, in total.
WriteTotal uint64
// Number of pages read directly via mmap()'d files.
ReadPages uint64
// Number of pages written directly via mmap()'d files.
WritePages uint64
}
// A NFSEventsStats contains statistics about NFS event occurrences.
type NFSEventsStats struct {
// Number of times cached inode attributes are re-validated from the server.
InodeRevalidate uint64
// Number of times cached dentry nodes are re-validated from the server.
DnodeRevalidate uint64
// Number of times an inode cache is cleared.
DataInvalidate uint64
// Number of times cached inode attributes are invalidated.
AttributeInvalidate uint64
// Number of times files or directories have been open()'d.
VFSOpen uint64
// Number of times a directory lookup has occurred.
VFSLookup uint64
// Number of times permissions have been checked.
VFSAccess uint64
// Number of updates (and potential writes) to pages.
VFSUpdatePage uint64
// Number of pages read directly via mmap()'d files.
VFSReadPage uint64
// Number of times a group of pages have been read.
VFSReadPages uint64
// Number of pages written directly via mmap()'d files.
VFSWritePage uint64
// Number of times a group of pages have been written.
VFSWritePages uint64
// Number of times directory entries have been read with getdents().
VFSGetdents uint64
// Number of times attributes have been set on inodes.
VFSSetattr uint64
// Number of pending writes that have been forcefully flushed to the server.
VFSFlush uint64
// Number of times fsync() has been called on directories and files.
VFSFsync uint64
// Number of times locking has been attempted on a file.
VFSLock uint64
// Number of times files have been closed and released.
VFSFileRelease uint64
// Unknown. Possibly unused.
CongestionWait uint64
// Number of times files have been truncated.
Truncation uint64
// Number of times a file has been grown due to writes beyond its existing end.
WriteExtension uint64
// Number of times a file was removed while still open by another process.
SillyRename uint64
// Number of times the NFS server gave less data than expected while reading.
ShortRead uint64
// Number of times the NFS server wrote less data than expected while writing.
ShortWrite uint64
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
// offline storage.
JukeboxDelay uint64
// Number of NFS v4.1+ pNFS reads.
PNFSRead uint64
// Number of NFS v4.1+ pNFS writes.
PNFSWrite uint64
}
// A NFSOperationStats contains statistics for a single operation.
type NFSOperationStats struct {
// The name of the operation.
Operation string
// Number of requests performed for this operation.
Requests uint64
// Number of times an actual RPC request has been transmitted for this operation.
Transmissions uint64
// Number of times a request has had a major timeout.
MajorTimeouts uint64
// Number of bytes sent for this operation, including RPC headers and payload.
BytesSent uint64
// Number of bytes received for this operation, including RPC headers and payload.
BytesReceived uint64
// Duration all requests spent queued for transmission before they were sent.
CumulativeQueueTime time.Duration
// Duration it took to get a reply back after the request was transmitted.
CumulativeTotalResponseTime time.Duration
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestTime time.Duration
}
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
// responses.
type NFSTransportStats struct {
// The local port used for the NFS mount.
Port uint64
// Number of times the client has had to establish a connection from scratch
// to the NFS server.
Bind uint64
// Number of times the client has made a TCP connection to the NFS server.
Connect uint64
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
// spent waiting for connections to the server to be established.
ConnectIdleTime uint64
// Duration since the NFS mount last saw any RPC traffic.
IdleTime time.Duration
// Number of RPC requests for this mount sent to the NFS server.
Sends uint64
// Number of RPC responses for this mount received from the NFS server.
Receives uint64
// Number of times the NFS server sent a response with a transaction ID
// unknown to this client.
BadTransactionIDs uint64
// A running counter, incremented on each request as the current difference
// ebetween sends and receives.
CumulativeActiveRequests uint64
// A running counter, incremented on each request by the current backlog
// queue size.
CumulativeBacklog uint64
// Stats below only available with stat version 1.1.
// Maximum number of simultaneously active RPC requests ever used.
MaximumRPCSlotsUsed uint64
// A running counter, incremented on each request as the current size of the
// sending queue.
CumulativeSendingQueue uint64
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue uint64
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
// of Mount structures containing detailed information about each mount.
// If available, statistics for each mount are parsed as well.
func parseMountStats(r io.Reader) ([]*Mount, error) {
const (
device = "device"
statVersionPrefix = "statvers="
nfs3Type = "nfs"
nfs4Type = "nfs4"
)
var mounts []*Mount
s := bufio.NewScanner(r)
for s.Scan() {
// Only look for device entries in this function
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 || ss[0] != device {
continue
}
m, err := parseMount(ss)
if err != nil {
return nil, err
}
// Does this mount also possess statistics information?
if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type {
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
}
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
stats, err := parseMountStatsNFS(s, statVersion)
if err != nil {
return nil, err
}
m.Stats = stats
}
mounts = append(mounts, m)
}
return mounts, s.Err()
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss)
}
// Check for specific words appearing at specific indices to ensure
// the format is consistent with what we expect
format := []struct {
i int
s string
}{
{i: 0, s: "device"},
{i: 2, s: "mounted"},
{i: 3, s: "on"},
{i: 5, s: "with"},
{i: 6, s: "fstype"},
}
for _, f := range format {
if ss[f.i] != f.s {
return nil, fmt.Errorf("invalid device entry: %v", ss)
}
}
return &Mount{
Device: ss[1],
Mount: ss[4],
Type: ss[7],
}, nil
}
// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
// related to NFS statistics.
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
// Field indicators for parsing specific types of data
const (
fieldAge = "age:"
fieldBytes = "bytes:"
fieldEvents = "events:"
fieldPerOpStats = "per-op"
fieldTransport = "xprt:"
)
stats := &MountStatsNFS{
StatVersion: statVersion,
}
for s.Scan() {
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 {
break
}
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
switch ss[0] {
case fieldAge:
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
if err != nil {
return nil, err
}
stats.Age = d
case fieldBytes:
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
return nil, err
}
stats.Bytes = *bstats
case fieldEvents:
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
return nil, err
}
stats.Events = *estats
case fieldTransport:
if len(ss) < 3 {
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
}
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
if err != nil {
return nil, err
}
stats.Transport = *tstats
}
// When encountering "per-operation statistics", we must break this
// loop and parse them separately to ensure we can terminate parsing
// before reaching another device entry; hence why this 'if' statement
// is not just another switch case
if ss[0] == fieldPerOpStats {
break
}
}
if err := s.Err(); err != nil {
return nil, err
}
// NFS per-operation stats appear last before the next device entry
perOpStats, err := parseNFSOperationStats(s)
if err != nil {
return nil, err
}
stats.Operations = perOpStats
return stats, nil
}
// parseNFSBytesStats parses a NFSBytesStats line using an input set of
// integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen {
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
}
ns := make([]uint64, 0, fieldBytesLen)
for _, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSBytesStats{
Read: ns[0],
Write: ns[1],
DirectRead: ns[2],
DirectWrite: ns[3],
ReadTotal: ns[4],
WriteTotal: ns[5],
ReadPages: ns[6],
WritePages: ns[7],
}, nil
}
// parseNFSEventsStats parses a NFSEventsStats line using an input set of
// integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen {
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
}
ns := make([]uint64, 0, fieldEventsLen)
for _, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSEventsStats{
InodeRevalidate: ns[0],
DnodeRevalidate: ns[1],
DataInvalidate: ns[2],
AttributeInvalidate: ns[3],
VFSOpen: ns[4],
VFSLookup: ns[5],
VFSAccess: ns[6],
VFSUpdatePage: ns[7],
VFSReadPage: ns[8],
VFSReadPages: ns[9],
VFSWritePage: ns[10],
VFSWritePages: ns[11],
VFSGetdents: ns[12],
VFSSetattr: ns[13],
VFSFlush: ns[14],
VFSFsync: ns[15],
VFSLock: ns[16],
VFSFileRelease: ns[17],
CongestionWait: ns[18],
Truncation: ns[19],
WriteExtension: ns[20],
SillyRename: ns[21],
ShortRead: ns[22],
ShortWrite: ns[23],
JukeboxDelay: ns[24],
PNFSRead: ns[25],
PNFSWrite: ns[26],
}, nil
}
// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
// additional information about per-operation statistics until an empty
// line is reached.
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
const (
// Number of expected fields in each per-operation statistics set
numFields = 9
)
var ops []NFSOperationStats
for s.Scan() {
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 {
// Must break when reading a blank line after per-operation stats to
// enable top-level function to parse the next device entry
break
}
if len(ss) != numFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
}
// Skip string operation name for integers
ns := make([]uint64, 0, numFields-1)
for _, st := range ss[1:] {
n, err := strconv.ParseUint(st, 10, 64)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
ops = append(ops, NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
Transmissions: ns[1],
MajorTimeouts: ns[2],
BytesSent: ns[3],
BytesReceived: ns[4],
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
})
}
return ops, s.Err()
}
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
// integer fields matched to a specific stats version.
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
switch statVersion {
case statVersion10:
if len(ss) != fieldTransport10Len {
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
}
case statVersion11:
if len(ss) != fieldTransport11Len {
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
}
default:
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
// in a v1.0 response.
//
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
// only v1.0 stats are present.
// See: https://github.com/prometheus/node_exporter/issues/571.
ns := make([]uint64, fieldTransport11Len)
for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
ns[i] = n
}
return &NFSTransportStats{
Port: ns[0],
Bind: ns[1],
Connect: ns[2],
ConnectIdleTime: ns[3],
IdleTime: time.Duration(ns[4]) * time.Second,
Sends: ns[5],
Receives: ns[6],
BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9],
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11],
CumulativePendingQueue: ns[12],
}, nil
}

216
vendor/github.com/prometheus/procfs/net_dev.go generated vendored Normal file
View File

@@ -0,0 +1,216 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"errors"
"os"
"sort"
"strconv"
"strings"
)
// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
type NetDevLine struct {
Name string `json:"name"` // The name of the interface.
RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
}
// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
// are interface names.
type NetDev map[string]NetDevLine
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
func NewNetDev() (NetDev, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewNetDev()
}
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
func (fs FS) NewNetDev() (NetDev, error) {
return newNetDev(fs.Path("net/dev"))
}
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
func (p Proc) NewNetDev() (NetDev, error) {
return newNetDev(p.path("net/dev"))
}
// newNetDev creates a new NetDev from the contents of the given file.
func newNetDev(file string) (NetDev, error) {
f, err := os.Open(file)
if err != nil {
return NetDev{}, err
}
defer f.Close()
nd := NetDev{}
s := bufio.NewScanner(f)
for n := 0; s.Scan(); n++ {
// Skip the 2 header lines.
if n < 2 {
continue
}
line, err := nd.parseLine(s.Text())
if err != nil {
return nd, err
}
nd[line.Name] = *line
}
return nd, s.Err()
}
// parseLine parses a single line from the /proc/net/dev file. Header lines
// must be filtered prior to calling this method.
func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
parts := strings.SplitN(rawLine, ":", 2)
if len(parts) != 2 {
return nil, errors.New("invalid net/dev line, missing colon")
}
fields := strings.Fields(strings.TrimSpace(parts[1]))
var err error
line := &NetDevLine{}
// Interface Name
line.Name = strings.TrimSpace(parts[0])
if line.Name == "" {
return nil, errors.New("invalid net/dev line, empty interface name")
}
// RX
line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return nil, err
}
line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return nil, err
}
line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
if err != nil {
return nil, err
}
line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
if err != nil {
return nil, err
}
line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
if err != nil {
return nil, err
}
line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
if err != nil {
return nil, err
}
line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
if err != nil {
return nil, err
}
// TX
line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
if err != nil {
return nil, err
}
line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
if err != nil {
return nil, err
}
line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
if err != nil {
return nil, err
}
line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
if err != nil {
return nil, err
}
line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
if err != nil {
return nil, err
}
line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
if err != nil {
return nil, err
}
line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
if err != nil {
return nil, err
}
line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
if err != nil {
return nil, err
}
return line, nil
}
// Total aggregates the values across interfaces and returns a new NetDevLine.
// The Name field will be a sorted comma seperated list of interface names.
func (nd NetDev) Total() NetDevLine {
total := NetDevLine{}
names := make([]string, 0, len(nd))
for _, ifc := range nd {
names = append(names, ifc.Name)
total.RxBytes += ifc.RxBytes
total.RxPackets += ifc.RxPackets
total.RxPackets += ifc.RxPackets
total.RxErrors += ifc.RxErrors
total.RxDropped += ifc.RxDropped
total.RxFIFO += ifc.RxFIFO
total.RxFrame += ifc.RxFrame
total.RxCompressed += ifc.RxCompressed
total.RxMulticast += ifc.RxMulticast
total.TxBytes += ifc.TxBytes
total.TxPackets += ifc.TxPackets
total.TxErrors += ifc.TxErrors
total.TxDropped += ifc.TxDropped
total.TxFIFO += ifc.TxFIFO
total.TxCollisions += ifc.TxCollisions
total.TxCarrier += ifc.TxCarrier
total.TxCompressed += ifc.TxCompressed
}
sort.Strings(names)
total.Name = strings.Join(names, ", ")
return total
}

263
vendor/github.com/prometheus/procfs/nfs/nfs.go generated vendored Normal file
View File

@@ -0,0 +1,263 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package nfsd implements parsing of /proc/net/rpc/nfsd.
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
package nfs
// ReplyCache models the "rc" line.
type ReplyCache struct {
Hits uint64
Misses uint64
NoCache uint64
}
// FileHandles models the "fh" line.
type FileHandles struct {
Stale uint64
TotalLookups uint64
AnonLookups uint64
DirNoCache uint64
NoDirNoCache uint64
}
// InputOutput models the "io" line.
type InputOutput struct {
Read uint64
Write uint64
}
// Threads models the "th" line.
type Threads struct {
Threads uint64
FullCnt uint64
}
// ReadAheadCache models the "ra" line.
type ReadAheadCache struct {
CacheSize uint64
CacheHistogram []uint64
NotFound uint64
}
// Network models the "net" line.
type Network struct {
NetCount uint64
UDPCount uint64
TCPCount uint64
TCPConnect uint64
}
// ClientRPC models the nfs "rpc" line.
type ClientRPC struct {
RPCCount uint64
Retransmissions uint64
AuthRefreshes uint64
}
// ServerRPC models the nfsd "rpc" line.
type ServerRPC struct {
RPCCount uint64
BadCnt uint64
BadFmt uint64
BadAuth uint64
BadcInt uint64
}
// V2Stats models the "proc2" line.
type V2Stats struct {
Null uint64
GetAttr uint64
SetAttr uint64
Root uint64
Lookup uint64
ReadLink uint64
Read uint64
WrCache uint64
Write uint64
Create uint64
Remove uint64
Rename uint64
Link uint64
SymLink uint64
MkDir uint64
RmDir uint64
ReadDir uint64
FsStat uint64
}
// V3Stats models the "proc3" line.
type V3Stats struct {
Null uint64
GetAttr uint64
SetAttr uint64
Lookup uint64
Access uint64
ReadLink uint64
Read uint64
Write uint64
Create uint64
MkDir uint64
SymLink uint64
MkNod uint64
Remove uint64
RmDir uint64
Rename uint64
Link uint64
ReadDir uint64
ReadDirPlus uint64
FsStat uint64
FsInfo uint64
PathConf uint64
Commit uint64
}
// ClientV4Stats models the nfs "proc4" line.
type ClientV4Stats struct {
Null uint64
Read uint64
Write uint64
Commit uint64
Open uint64
OpenConfirm uint64
OpenNoattr uint64
OpenDowngrade uint64
Close uint64
Setattr uint64
FsInfo uint64
Renew uint64
SetClientId uint64
SetClientIdConfirm uint64
Lock uint64
Lockt uint64
Locku uint64
Access uint64
Getattr uint64
Lookup uint64
LookupRoot uint64
Remove uint64
Rename uint64
Link uint64
Symlink uint64
Create uint64
Pathconf uint64
StatFs uint64
ReadLink uint64
ReadDir uint64
ServerCaps uint64
DelegReturn uint64
GetAcl uint64
SetAcl uint64
FsLocations uint64
ReleaseLockowner uint64
Secinfo uint64
FsidPresent uint64
ExchangeId uint64
CreateSession uint64
DestroySession uint64
Sequence uint64
GetLeaseTime uint64
ReclaimComplete uint64
LayoutGet uint64
GetDeviceInfo uint64
LayoutCommit uint64
LayoutReturn uint64
SecinfoNoName uint64
TestStateId uint64
FreeStateId uint64
GetDeviceList uint64
BindConnToSession uint64
DestroyClientId uint64
Seek uint64
Allocate uint64
DeAllocate uint64
LayoutStats uint64
Clone uint64
}
// ServerV4Stats models the nfsd "proc4" line.
type ServerV4Stats struct {
Null uint64
Compound uint64
}
// V4Ops models the "proc4ops" line: NFSv4 operations
// Variable list, see:
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
type V4Ops struct {
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
Op0Unused uint64
Op1Unused uint64
Op2Future uint64
Access uint64
Close uint64
Commit uint64
Create uint64
DelegPurge uint64
DelegReturn uint64
GetAttr uint64
GetFH uint64
Link uint64
Lock uint64
Lockt uint64
Locku uint64
Lookup uint64
LookupRoot uint64
Nverify uint64
Open uint64
OpenAttr uint64
OpenConfirm uint64
OpenDgrd uint64
PutFH uint64
PutPubFH uint64
PutRootFH uint64
Read uint64
ReadDir uint64
ReadLink uint64
Remove uint64
Rename uint64
Renew uint64
RestoreFH uint64
SaveFH uint64
SecInfo uint64
SetAttr uint64
Verify uint64
Write uint64
RelLockOwner uint64
}
// RPCStats models all stats from /proc/net/rpc/nfs.
type ClientRPCStats struct {
Network Network
ClientRPC ClientRPC
V2Stats V2Stats
V3Stats V3Stats
ClientV4Stats ClientV4Stats
}
// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
type ServerRPCStats struct {
ReplyCache ReplyCache
FileHandles FileHandles
InputOutput InputOutput
Threads Threads
ReadAheadCache ReadAheadCache
Network Network
ServerRPC ServerRPC
V2Stats V2Stats
V3Stats V3Stats
ServerV4Stats ServerV4Stats
V4Ops V4Ops
}

317
vendor/github.com/prometheus/procfs/nfs/parse.go generated vendored Normal file
View File

@@ -0,0 +1,317 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nfs
import (
"fmt"
)
func parseReplyCache(v []uint64) (ReplyCache, error) {
if len(v) != 3 {
return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
}
return ReplyCache{
Hits: v[0],
Misses: v[1],
NoCache: v[2],
}, nil
}
func parseFileHandles(v []uint64) (FileHandles, error) {
if len(v) != 5 {
return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
}
return FileHandles{
Stale: v[0],
TotalLookups: v[1],
AnonLookups: v[2],
DirNoCache: v[3],
NoDirNoCache: v[4],
}, nil
}
func parseInputOutput(v []uint64) (InputOutput, error) {
if len(v) != 2 {
return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
}
return InputOutput{
Read: v[0],
Write: v[1],
}, nil
}
func parseThreads(v []uint64) (Threads, error) {
if len(v) != 2 {
return Threads{}, fmt.Errorf("invalid Threads line %q", v)
}
return Threads{
Threads: v[0],
FullCnt: v[1],
}, nil
}
func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
if len(v) != 12 {
return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
}
return ReadAheadCache{
CacheSize: v[0],
CacheHistogram: v[1:11],
NotFound: v[11],
}, nil
}
func parseNetwork(v []uint64) (Network, error) {
if len(v) != 4 {
return Network{}, fmt.Errorf("invalid Network line %q", v)
}
return Network{
NetCount: v[0],
UDPCount: v[1],
TCPCount: v[2],
TCPConnect: v[3],
}, nil
}
func parseServerRPC(v []uint64) (ServerRPC, error) {
if len(v) != 5 {
return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
}
return ServerRPC{
RPCCount: v[0],
BadCnt: v[1],
BadFmt: v[2],
BadAuth: v[3],
BadcInt: v[4],
}, nil
}
func parseClientRPC(v []uint64) (ClientRPC, error) {
if len(v) != 3 {
return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
}
return ClientRPC{
RPCCount: v[0],
Retransmissions: v[1],
AuthRefreshes: v[2],
}, nil
}
func parseV2Stats(v []uint64) (V2Stats, error) {
values := int(v[0])
if len(v[1:]) != values || values != 18 {
return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
}
return V2Stats{
Null: v[1],
GetAttr: v[2],
SetAttr: v[3],
Root: v[4],
Lookup: v[5],
ReadLink: v[6],
Read: v[7],
WrCache: v[8],
Write: v[9],
Create: v[10],
Remove: v[11],
Rename: v[12],
Link: v[13],
SymLink: v[14],
MkDir: v[15],
RmDir: v[16],
ReadDir: v[17],
FsStat: v[18],
}, nil
}
func parseV3Stats(v []uint64) (V3Stats, error) {
values := int(v[0])
if len(v[1:]) != values || values != 22 {
return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
}
return V3Stats{
Null: v[1],
GetAttr: v[2],
SetAttr: v[3],
Lookup: v[4],
Access: v[5],
ReadLink: v[6],
Read: v[7],
Write: v[8],
Create: v[9],
MkDir: v[10],
SymLink: v[11],
MkNod: v[12],
Remove: v[13],
RmDir: v[14],
Rename: v[15],
Link: v[16],
ReadDir: v[17],
ReadDirPlus: v[18],
FsStat: v[19],
FsInfo: v[20],
PathConf: v[21],
Commit: v[22],
}, nil
}
func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
values := int(v[0])
if len(v[1:]) != values {
return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
}
// This function currently supports mapping 59 NFS v4 client stats. Older
// kernels may emit fewer stats, so we must detect this and pad out the
// values to match the expected slice size.
if values < 59 {
newValues := make([]uint64, 60)
copy(newValues, v)
v = newValues
}
return ClientV4Stats{
Null: v[1],
Read: v[2],
Write: v[3],
Commit: v[4],
Open: v[5],
OpenConfirm: v[6],
OpenNoattr: v[7],
OpenDowngrade: v[8],
Close: v[9],
Setattr: v[10],
FsInfo: v[11],
Renew: v[12],
SetClientId: v[13],
SetClientIdConfirm: v[14],
Lock: v[15],
Lockt: v[16],
Locku: v[17],
Access: v[18],
Getattr: v[19],
Lookup: v[20],
LookupRoot: v[21],
Remove: v[22],
Rename: v[23],
Link: v[24],
Symlink: v[25],
Create: v[26],
Pathconf: v[27],
StatFs: v[28],
ReadLink: v[29],
ReadDir: v[30],
ServerCaps: v[31],
DelegReturn: v[32],
GetAcl: v[33],
SetAcl: v[34],
FsLocations: v[35],
ReleaseLockowner: v[36],
Secinfo: v[37],
FsidPresent: v[38],
ExchangeId: v[39],
CreateSession: v[40],
DestroySession: v[41],
Sequence: v[42],
GetLeaseTime: v[43],
ReclaimComplete: v[44],
LayoutGet: v[45],
GetDeviceInfo: v[46],
LayoutCommit: v[47],
LayoutReturn: v[48],
SecinfoNoName: v[49],
TestStateId: v[50],
FreeStateId: v[51],
GetDeviceList: v[52],
BindConnToSession: v[53],
DestroyClientId: v[54],
Seek: v[55],
Allocate: v[56],
DeAllocate: v[57],
LayoutStats: v[58],
Clone: v[59],
}, nil
}
func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
values := int(v[0])
if len(v[1:]) != values || values != 2 {
return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
}
return ServerV4Stats{
Null: v[1],
Compound: v[2],
}, nil
}
func parseV4Ops(v []uint64) (V4Ops, error) {
values := int(v[0])
if len(v[1:]) != values || values < 39 {
return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
}
stats := V4Ops{
Op0Unused: v[1],
Op1Unused: v[2],
Op2Future: v[3],
Access: v[4],
Close: v[5],
Commit: v[6],
Create: v[7],
DelegPurge: v[8],
DelegReturn: v[9],
GetAttr: v[10],
GetFH: v[11],
Link: v[12],
Lock: v[13],
Lockt: v[14],
Locku: v[15],
Lookup: v[16],
LookupRoot: v[17],
Nverify: v[18],
Open: v[19],
OpenAttr: v[20],
OpenConfirm: v[21],
OpenDgrd: v[22],
PutFH: v[23],
PutPubFH: v[24],
PutRootFH: v[25],
Read: v[26],
ReadDir: v[27],
ReadLink: v[28],
Remove: v[29],
Rename: v[30],
Renew: v[31],
RestoreFH: v[32],
SaveFH: v[33],
SecInfo: v[34],
SetAttr: v[35],
Verify: v[36],
Write: v[37],
RelLockOwner: v[38],
}
return stats, nil
}

67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nfs
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
stats := &ClientRPCStats{}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
// require at least <key> <value>
if len(parts) < 2 {
return nil, fmt.Errorf("invalid NFS metric line %q", line)
}
values, err := util.ParseUint64s(parts[1:])
if err != nil {
return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
}
switch metricLine := parts[0]; metricLine {
case "net":
stats.Network, err = parseNetwork(values)
case "rpc":
stats.ClientRPC, err = parseClientRPC(values)
case "proc2":
stats.V2Stats, err = parseV2Stats(values)
case "proc3":
stats.V3Stats, err = parseV3Stats(values)
case "proc4":
stats.ClientV4Stats, err = parseClientV4Stats(values)
default:
return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
}
if err != nil {
return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error scanning NFS file: %s", err)
}
return stats, nil
}

89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nfs
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
stats := &ServerRPCStats{}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
// require at least <key> <value>
if len(parts) < 2 {
return nil, fmt.Errorf("invalid NFSd metric line %q", line)
}
label := parts[0]
var values []uint64
var err error
if label == "th" {
if len(parts) < 3 {
return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
}
values, err = util.ParseUint64s(parts[1:3])
} else {
values, err = util.ParseUint64s(parts[1:])
}
if err != nil {
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
}
switch metricLine := parts[0]; metricLine {
case "rc":
stats.ReplyCache, err = parseReplyCache(values)
case "fh":
stats.FileHandles, err = parseFileHandles(values)
case "io":
stats.InputOutput, err = parseInputOutput(values)
case "th":
stats.Threads, err = parseThreads(values)
case "ra":
stats.ReadAheadCache, err = parseReadAheadCache(values)
case "net":
stats.Network, err = parseNetwork(values)
case "rpc":
stats.ServerRPC, err = parseServerRPC(values)
case "proc2":
stats.V2Stats, err = parseV2Stats(values)
case "proc3":
stats.V3Stats, err = parseV3Stats(values)
case "proc4":
stats.ServerV4Stats, err = parseServerV4Stats(values)
case "proc4ops":
stats.V4Ops, err = parseV4Ops(values)
default:
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
}
if err != nil {
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error scanning NFSd file: %s", err)
}
return stats, nil
}

View File

@@ -1,6 +1,20 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bytes"
"fmt"
"io/ioutil"
"os"
@@ -113,7 +127,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil
}
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
}
// Comm returns the command name of a process.
@@ -192,6 +206,18 @@ func (p Proc) FileDescriptorsLen() (int, error) {
return len(fds), nil
}
// MountStats retrieves statistics and configuration for mount points in a
// process's namespace.
func (p Proc) MountStats() ([]*Mount, error) {
f, err := os.Open(p.path("mountstats"))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountStats(f)
}
func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd"))
if err != nil {

View File

@@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
@@ -47,9 +60,6 @@ func (p Proc) NewIO() (ProcIO, error) {
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
if err != nil {
return pio, err
}
return pio, nil
return pio, err
}

View File

@@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
@@ -13,46 +26,46 @@ import (
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
// CPU time limit in seconds.
CPUTime int
CPUTime int64
// Maximum size of files that the process may create.
FileSize int
FileSize int64
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
DataSize int
DataSize int64
// Maximum size of the process stack in bytes.
StackSize int
StackSize int64
// Maximum size of a core file.
CoreFileSize int
CoreFileSize int64
// Limit of the process's resident set in pages.
ResidentSet int
ResidentSet int64
// Maximum number of processes that can be created for the real user ID of
// the calling process.
Processes int
Processes int64
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
OpenFiles int
OpenFiles int64
// Maximum number of bytes of memory that may be locked into RAM.
LockedMemory int
LockedMemory int64
// Maximum size of the process's virtual memory address space in bytes.
AddressSpace int
AddressSpace int64
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
FileLocks int
FileLocks int64
// Limit of signals that may be queued for the real user ID of the calling
// process.
PendingSignals int
PendingSignals int64
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
MsqqueueSize int
MsqqueueSize int64
// Limit of the nice priority set using setpriority(2) or nice(2).
NicePriority int
NicePriority int64
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
RealtimePriority int
RealtimePriority int64
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
RealtimeTimeout int
RealtimeTimeout int64
}
const (
@@ -125,13 +138,13 @@ func (p Proc) NewLimits() (ProcLimits, error) {
return l, s.Err()
}
func parseInt(s string) (int, error) {
func parseInt(s string) (int64, error) {
if s == limitsUnlimited {
return -1, nil
}
i, err := strconv.ParseInt(s, 10, 32)
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
}
return int(i), nil
return i, nil
}

68
vendor/github.com/prometheus/procfs/proc_ns.go generated vendored Normal file
View File

@@ -0,0 +1,68 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"os"
"strconv"
"strings"
)
// Namespace represents a single namespace of a process.
type Namespace struct {
Type string // Namespace type.
Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
}
// Namespaces contains all of the namespaces that the process is contained in.
type Namespaces map[string]Namespace
// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
// process is a member.
func (p Proc) NewNamespaces() (Namespaces, error) {
d, err := os.Open(p.path("ns"))
if err != nil {
return nil, err
}
defer d.Close()
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
}
ns := make(Namespaces, len(names))
for _, name := range names {
target, err := os.Readlink(p.path("ns", name))
if err != nil {
return nil, err
}
fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
}
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}
}
return ns, nil
}

View File

@@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (

View File

@@ -1,17 +1,81 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// CPUStat shows how much time the cpu spend in various stages.
type CPUStat struct {
User float64
Nice float64
System float64
Idle float64
Iowait float64
IRQ float64
SoftIRQ float64
Steal float64
Guest float64
GuestNice float64
}
// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
// It is possible to get per-cpu stats by reading /proc/softirqs
type SoftIRQStat struct {
Hi uint64
Timer uint64
NetTx uint64
NetRx uint64
Block uint64
BlockIoPoll uint64
Tasklet uint64
Sched uint64
Hrtimer uint64
Rcu uint64
}
// Stat represents kernel/system statistics.
type Stat struct {
// Boot time in seconds since the Epoch.
BootTime int64
BootTime uint64
// Summed up cpu statistics.
CPUTotal CPUStat
// Per-CPU statistics.
CPU []CPUStat
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
IRQTotal uint64
// Number of times a numbered IRQ was triggered.
IRQ []uint64
// Number of times a context switch happened.
ContextSwitches uint64
// Number of times a process was created.
ProcessCreated uint64
// Number of processes currently running.
ProcessesRunning uint64
// Number of processes currently blocked (waiting for IO).
ProcessesBlocked uint64
// Number of times a softirq was scheduled.
SoftIRQTotal uint64
// Detailed softirq statistics.
SoftIRQ SoftIRQStat
}
// NewStat returns kernel/system statistics read from /proc/stat.
@@ -24,33 +88,145 @@ func NewStat() (Stat, error) {
return fs.NewStat()
}
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
func parseCPUStat(line string) (CPUStat, int64, error) {
cpuStat := CPUStat{}
var cpu string
count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
&cpu,
&cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
&cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
}
if count == 0 {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
}
cpuStat.User /= userHZ
cpuStat.Nice /= userHZ
cpuStat.System /= userHZ
cpuStat.Idle /= userHZ
cpuStat.Iowait /= userHZ
cpuStat.IRQ /= userHZ
cpuStat.SoftIRQ /= userHZ
cpuStat.Steal /= userHZ
cpuStat.Guest /= userHZ
cpuStat.GuestNice /= userHZ
if cpu == "cpu" {
return cpuStat, -1, nil
}
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
}
return cpuStat, cpuID, nil
}
// Parse a softirq line.
func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
softIRQStat := SoftIRQStat{}
var total uint64
var prefix string
_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
&prefix, &total,
&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
&softIRQStat.Block, &softIRQStat.BlockIoPoll,
&softIRQStat.Tasklet, &softIRQStat.Sched,
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
}
return softIRQStat, total, nil
}
// NewStat returns an information about current kernel/system statistics.
func (fs FS) NewStat() (Stat, error) {
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
f, err := os.Open(fs.Path("stat"))
if err != nil {
return Stat{}, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
line := s.Text()
if !strings.HasPrefix(line, "btime") {
stat := Stat{}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
// require at least <key> <value>
if len(parts) < 2 {
continue
}
fields := strings.Fields(line)
if len(fields) != 2 {
return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
if err != nil {
return Stat{}, err
}
stat.SoftIRQTotal = total
stat.SoftIRQ = softIRQStats
case strings.HasPrefix(parts[0], "cpu"):
cpuStat, cpuID, err := parseCPUStat(line)
if err != nil {
return Stat{}, err
}
if cpuID == -1 {
stat.CPUTotal = cpuStat
} else {
for int64(len(stat.CPU)) <= cpuID {
stat.CPU = append(stat.CPU, CPUStat{})
}
stat.CPU[cpuID] = cpuStat
}
}
i, err := strconv.ParseInt(fields[1], 10, 32)
if err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
}
return Stat{BootTime: i}, nil
}
if err := s.Err(); err != nil {
if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
}
return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
return stat, nil
}

187
vendor/github.com/prometheus/procfs/xfrm.go generated vendored Normal file
View File

@@ -0,0 +1,187 @@
// Copyright 2017 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
// XfrmStat models the contents of /proc/net/xfrm_stat.
type XfrmStat struct {
// All errors which are not matched by other
XfrmInError int
// No buffer is left
XfrmInBufferError int
// Header Error
XfrmInHdrError int
// No state found
// i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
XfrmInNoStates int
// Transformation protocol specific error
// e.g. SA Key is wrong
XfrmInStateProtoError int
// Transformation mode specific error
XfrmInStateModeError int
// Sequence error
// e.g. sequence number is out of window
XfrmInStateSeqError int
// State is expired
XfrmInStateExpired int
// State has mismatch option
// e.g. UDP encapsulation type is mismatched
XfrmInStateMismatch int
// State is invalid
XfrmInStateInvalid int
// No matching template for states
// e.g. Inbound SAs are correct but SP rule is wrong
XfrmInTmplMismatch int
// No policy is found for states
// e.g. Inbound SAs are correct but no SP is found
XfrmInNoPols int
// Policy discards
XfrmInPolBlock int
// Policy error
XfrmInPolError int
// All errors which are not matched by others
XfrmOutError int
// Bundle generation error
XfrmOutBundleGenError int
// Bundle check error
XfrmOutBundleCheckError int
// No state was found
XfrmOutNoStates int
// Transformation protocol specific error
XfrmOutStateProtoError int
// Transportation mode specific error
XfrmOutStateModeError int
// Sequence error
// i.e sequence number overflow
XfrmOutStateSeqError int
// State is expired
XfrmOutStateExpired int
// Policy discads
XfrmOutPolBlock int
// Policy is dead
XfrmOutPolDead int
// Policy Error
XfrmOutPolError int
XfrmFwdHdrError int
XfrmOutStateInvalid int
XfrmAcquireError int
}
// NewXfrmStat reads the xfrm_stat statistics.
func NewXfrmStat() (XfrmStat, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return XfrmStat{}, err
}
return fs.NewXfrmStat()
}
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
func (fs FS) NewXfrmStat() (XfrmStat, error) {
file, err := os.Open(fs.Path("net/xfrm_stat"))
if err != nil {
return XfrmStat{}, err
}
defer file.Close()
var (
x = XfrmStat{}
s = bufio.NewScanner(file)
)
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) != 2 {
return XfrmStat{}, fmt.Errorf(
"couldnt parse %s line %s", file.Name(), s.Text())
}
name := fields[0]
value, err := strconv.Atoi(fields[1])
if err != nil {
return XfrmStat{}, err
}
switch name {
case "XfrmInError":
x.XfrmInError = value
case "XfrmInBufferError":
x.XfrmInBufferError = value
case "XfrmInHdrError":
x.XfrmInHdrError = value
case "XfrmInNoStates":
x.XfrmInNoStates = value
case "XfrmInStateProtoError":
x.XfrmInStateProtoError = value
case "XfrmInStateModeError":
x.XfrmInStateModeError = value
case "XfrmInStateSeqError":
x.XfrmInStateSeqError = value
case "XfrmInStateExpired":
x.XfrmInStateExpired = value
case "XfrmInStateInvalid":
x.XfrmInStateInvalid = value
case "XfrmInTmplMismatch":
x.XfrmInTmplMismatch = value
case "XfrmInNoPols":
x.XfrmInNoPols = value
case "XfrmInPolBlock":
x.XfrmInPolBlock = value
case "XfrmInPolError":
x.XfrmInPolError = value
case "XfrmOutError":
x.XfrmOutError = value
case "XfrmInStateMismatch":
x.XfrmInStateMismatch = value
case "XfrmOutBundleGenError":
x.XfrmOutBundleGenError = value
case "XfrmOutBundleCheckError":
x.XfrmOutBundleCheckError = value
case "XfrmOutNoStates":
x.XfrmOutNoStates = value
case "XfrmOutStateProtoError":
x.XfrmOutStateProtoError = value
case "XfrmOutStateModeError":
x.XfrmOutStateModeError = value
case "XfrmOutStateSeqError":
x.XfrmOutStateSeqError = value
case "XfrmOutStateExpired":
x.XfrmOutStateExpired = value
case "XfrmOutPolBlock":
x.XfrmOutPolBlock = value
case "XfrmOutPolDead":
x.XfrmOutPolDead = value
case "XfrmOutPolError":
x.XfrmOutPolError = value
case "XfrmFwdHdrError":
x.XfrmFwdHdrError = value
case "XfrmOutStateInvalid":
x.XfrmOutStateInvalid = value
case "XfrmAcquireError":
x.XfrmAcquireError = value
}
}
return x, s.Err()
}

330
vendor/github.com/prometheus/procfs/xfs/parse.go generated vendored Normal file
View File

@@ -0,0 +1,330 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xfs
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// ParseStats parses a Stats from an input io.Reader, using the format
// found in /proc/fs/xfs/stat.
func ParseStats(r io.Reader) (*Stats, error) {
const (
// Fields parsed into stats structures.
fieldExtentAlloc = "extent_alloc"
fieldAbt = "abt"
fieldBlkMap = "blk_map"
fieldBmbt = "bmbt"
fieldDir = "dir"
fieldTrans = "trans"
fieldIg = "ig"
fieldLog = "log"
fieldRw = "rw"
fieldAttr = "attr"
fieldIcluster = "icluster"
fieldVnodes = "vnodes"
fieldBuf = "buf"
fieldXpc = "xpc"
// Unimplemented at this time due to lack of documentation.
fieldPushAil = "push_ail"
fieldXstrat = "xstrat"
fieldAbtb2 = "abtb2"
fieldAbtc2 = "abtc2"
fieldBmbt2 = "bmbt2"
fieldIbt2 = "ibt2"
fieldFibt2 = "fibt2"
fieldQm = "qm"
fieldDebug = "debug"
)
var xfss Stats
s := bufio.NewScanner(r)
for s.Scan() {
// Expect at least a string label and a single integer value, ex:
// - abt 0
// - rw 1 2
ss := strings.Fields(string(s.Bytes()))
if len(ss) < 2 {
continue
}
label := ss[0]
// Extended precision counters are uint64 values.
if label == fieldXpc {
us, err := util.ParseUint64s(ss[1:])
if err != nil {
return nil, err
}
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
if err != nil {
return nil, err
}
continue
}
// All other counters are uint32 values.
us, err := util.ParseUint32s(ss[1:])
if err != nil {
return nil, err
}
switch label {
case fieldExtentAlloc:
xfss.ExtentAllocation, err = extentAllocationStats(us)
case fieldAbt:
xfss.AllocationBTree, err = btreeStats(us)
case fieldBlkMap:
xfss.BlockMapping, err = blockMappingStats(us)
case fieldBmbt:
xfss.BlockMapBTree, err = btreeStats(us)
case fieldDir:
xfss.DirectoryOperation, err = directoryOperationStats(us)
case fieldTrans:
xfss.Transaction, err = transactionStats(us)
case fieldIg:
xfss.InodeOperation, err = inodeOperationStats(us)
case fieldLog:
xfss.LogOperation, err = logOperationStats(us)
case fieldRw:
xfss.ReadWrite, err = readWriteStats(us)
case fieldAttr:
xfss.AttributeOperation, err = attributeOperationStats(us)
case fieldIcluster:
xfss.InodeClustering, err = inodeClusteringStats(us)
case fieldVnodes:
xfss.Vnode, err = vnodeStats(us)
case fieldBuf:
xfss.Buffer, err = bufferStats(us)
}
if err != nil {
return nil, err
}
}
return &xfss, s.Err()
}
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
if l := len(us); l != 4 {
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
}
return ExtentAllocationStats{
ExtentsAllocated: us[0],
BlocksAllocated: us[1],
ExtentsFreed: us[2],
BlocksFreed: us[3],
}, nil
}
// btreeStats builds a BTreeStats from a slice of uint32s.
func btreeStats(us []uint32) (BTreeStats, error) {
if l := len(us); l != 4 {
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
}
return BTreeStats{
Lookups: us[0],
Compares: us[1],
RecordsInserted: us[2],
RecordsDeleted: us[3],
}, nil
}
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
if l := len(us); l != 7 {
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
}
return BlockMappingStats{
Reads: us[0],
Writes: us[1],
Unmaps: us[2],
ExtentListInsertions: us[3],
ExtentListDeletions: us[4],
ExtentListLookups: us[5],
ExtentListCompares: us[6],
}, nil
}
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
if l := len(us); l != 4 {
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
}
return DirectoryOperationStats{
Lookups: us[0],
Creates: us[1],
Removes: us[2],
Getdents: us[3],
}, nil
}
// TransactionStats builds a TransactionStats from a slice of uint32s.
func transactionStats(us []uint32) (TransactionStats, error) {
if l := len(us); l != 3 {
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
}
return TransactionStats{
Sync: us[0],
Async: us[1],
Empty: us[2],
}, nil
}
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
if l := len(us); l != 7 {
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
}
return InodeOperationStats{
Attempts: us[0],
Found: us[1],
Recycle: us[2],
Missed: us[3],
Duplicate: us[4],
Reclaims: us[5],
AttributeChange: us[6],
}, nil
}
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
func logOperationStats(us []uint32) (LogOperationStats, error) {
if l := len(us); l != 5 {
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
}
return LogOperationStats{
Writes: us[0],
Blocks: us[1],
NoInternalBuffers: us[2],
Force: us[3],
ForceSleep: us[4],
}, nil
}
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
func readWriteStats(us []uint32) (ReadWriteStats, error) {
if l := len(us); l != 2 {
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
}
return ReadWriteStats{
Read: us[0],
Write: us[1],
}, nil
}
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
if l := len(us); l != 4 {
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
}
return AttributeOperationStats{
Get: us[0],
Set: us[1],
Remove: us[2],
List: us[3],
}, nil
}
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
if l := len(us); l != 3 {
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
}
return InodeClusteringStats{
Iflush: us[0],
Flush: us[1],
FlushInode: us[2],
}, nil
}
// VnodeStats builds a VnodeStats from a slice of uint32s.
func vnodeStats(us []uint32) (VnodeStats, error) {
// The attribute "Free" appears to not be available on older XFS
// stats versions. Therefore, 7 or 8 elements may appear in
// this slice.
l := len(us)
if l != 7 && l != 8 {
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
}
s := VnodeStats{
Active: us[0],
Allocate: us[1],
Get: us[2],
Hold: us[3],
Release: us[4],
Reclaim: us[5],
Remove: us[6],
}
// Skip adding free, unless it is present. The zero value will
// be used in place of an actual count.
if l == 7 {
return s, nil
}
s.Free = us[7]
return s, nil
}
// BufferStats builds a BufferStats from a slice of uint32s.
func bufferStats(us []uint32) (BufferStats, error) {
if l := len(us); l != 9 {
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
}
return BufferStats{
Get: us[0],
Create: us[1],
GetLocked: us[2],
GetLockedWaited: us[3],
BusyLocked: us[4],
MissLocked: us[5],
PageRetries: us[6],
PageFound: us[7],
GetRead: us[8],
}, nil
}
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
if l := len(us); l != 3 {
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
}
return ExtendedPrecisionStats{
FlushBytes: us[0],
WriteBytes: us[1],
ReadBytes: us[2],
}, nil
}

163
vendor/github.com/prometheus/procfs/xfs/xfs.go generated vendored Normal file
View File

@@ -0,0 +1,163 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package xfs provides access to statistics exposed by the XFS filesystem.
package xfs
// Stats contains XFS filesystem runtime statistics, parsed from
// /proc/fs/xfs/stat.
//
// The names and meanings of each statistic were taken from
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
// kernel source. Most counters are uint32s (same data types used in
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
type Stats struct {
// The name of the filesystem used to source these statistics.
// If empty, this indicates aggregated statistics for all XFS
// filesystems on the host.
Name string
ExtentAllocation ExtentAllocationStats
AllocationBTree BTreeStats
BlockMapping BlockMappingStats
BlockMapBTree BTreeStats
DirectoryOperation DirectoryOperationStats
Transaction TransactionStats
InodeOperation InodeOperationStats
LogOperation LogOperationStats
ReadWrite ReadWriteStats
AttributeOperation AttributeOperationStats
InodeClustering InodeClusteringStats
Vnode VnodeStats
Buffer BufferStats
ExtendedPrecision ExtendedPrecisionStats
}
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
type ExtentAllocationStats struct {
ExtentsAllocated uint32
BlocksAllocated uint32
ExtentsFreed uint32
BlocksFreed uint32
}
// BTreeStats contains statistics regarding an XFS internal B-tree.
type BTreeStats struct {
Lookups uint32
Compares uint32
RecordsInserted uint32
RecordsDeleted uint32
}
// BlockMappingStats contains statistics regarding XFS block maps.
type BlockMappingStats struct {
Reads uint32
Writes uint32
Unmaps uint32
ExtentListInsertions uint32
ExtentListDeletions uint32
ExtentListLookups uint32
ExtentListCompares uint32
}
// DirectoryOperationStats contains statistics regarding XFS directory entries.
type DirectoryOperationStats struct {
Lookups uint32
Creates uint32
Removes uint32
Getdents uint32
}
// TransactionStats contains statistics regarding XFS metadata transactions.
type TransactionStats struct {
Sync uint32
Async uint32
Empty uint32
}
// InodeOperationStats contains statistics regarding XFS inode operations.
type InodeOperationStats struct {
Attempts uint32
Found uint32
Recycle uint32
Missed uint32
Duplicate uint32
Reclaims uint32
AttributeChange uint32
}
// LogOperationStats contains statistics regarding the XFS log buffer.
type LogOperationStats struct {
Writes uint32
Blocks uint32
NoInternalBuffers uint32
Force uint32
ForceSleep uint32
}
// ReadWriteStats contains statistics regarding the number of read and write
// system calls for XFS filesystems.
type ReadWriteStats struct {
Read uint32
Write uint32
}
// AttributeOperationStats contains statistics regarding manipulation of
// XFS extended file attributes.
type AttributeOperationStats struct {
Get uint32
Set uint32
Remove uint32
List uint32
}
// InodeClusteringStats contains statistics regarding XFS inode clustering
// operations.
type InodeClusteringStats struct {
Iflush uint32
Flush uint32
FlushInode uint32
}
// VnodeStats contains statistics regarding XFS vnode operations.
type VnodeStats struct {
Active uint32
Allocate uint32
Get uint32
Hold uint32
Release uint32
Reclaim uint32
Remove uint32
Free uint32
}
// BufferStats contains statistics regarding XFS read/write I/O buffers.
type BufferStats struct {
Get uint32
Create uint32
GetLocked uint32
GetLockedWaited uint32
BusyLocked uint32
MissLocked uint32
PageRetries uint32
PageFound uint32
GetRead uint32
}
// ExtendedPrecisionStats contains high precision counters used to track the
// total number of bytes read, written, or flushed, during XFS operations.
type ExtendedPrecisionStats struct {
FlushBytes uint64
WriteBytes uint64
ReadBytes uint64
}