Update swagger to 0.20.1 (#8010)
* update swagger to 0.20.1 * fiw swagger version for validatetokarchuk/v1.17
parent
e0f95d1545
commit
256b178176
@ -1,15 +0,0 @@ |
||||
# This is the official list of cloud authors for copyright purposes. |
||||
# This file is distinct from the CONTRIBUTORS files. |
||||
# See the latter for an explanation. |
||||
|
||||
# Names should be added to this file as: |
||||
# Name or Organization <email address> |
||||
# The email address is not required for organizations. |
||||
|
||||
Filippo Valsorda <hi@filippo.io> |
||||
Google Inc. |
||||
Ingo Oeser <nightlyone@googlemail.com> |
||||
Palm Stone Games, Inc. |
||||
Paweł Knap <pawelknap88@gmail.com> |
||||
Péter Szilágyi <peterke@gmail.com> |
||||
Tyler Treat <ttreat31@gmail.com> |
@ -1,40 +0,0 @@ |
||||
# People who have agreed to one of the CLAs and can contribute patches. |
||||
# The AUTHORS file lists the copyright holders; this file |
||||
# lists people. For example, Google employees are listed here |
||||
# but not in AUTHORS, because Google holds the copyright. |
||||
# |
||||
# https://developers.google.com/open-source/cla/individual |
||||
# https://developers.google.com/open-source/cla/corporate |
||||
# |
||||
# Names should be added to this file as: |
||||
# Name <email address> |
||||
|
||||
# Keep the list alphabetically sorted. |
||||
|
||||
Alexis Hunt <lexer@google.com> |
||||
Andreas Litt <andreas.litt@gmail.com> |
||||
Andrew Gerrand <adg@golang.org> |
||||
Brad Fitzpatrick <bradfitz@golang.org> |
||||
Burcu Dogan <jbd@google.com> |
||||
Dave Day <djd@golang.org> |
||||
David Sansome <me@davidsansome.com> |
||||
David Symonds <dsymonds@golang.org> |
||||
Filippo Valsorda <hi@filippo.io> |
||||
Glenn Lewis <gmlewis@google.com> |
||||
Ingo Oeser <nightlyone@googlemail.com> |
||||
James Hall <james.hall@shopify.com> |
||||
Johan Euphrosine <proppy@google.com> |
||||
Jonathan Amsterdam <jba@google.com> |
||||
Kunpei Sakai <namusyaka@gmail.com> |
||||
Luna Duclos <luna.duclos@palmstonegames.com> |
||||
Magnus Hiie <magnus.hiie@gmail.com> |
||||
Mario Castro <mariocaster@gmail.com> |
||||
Michael McGreevy <mcgreevy@golang.org> |
||||
Omar Jarjur <ojarjur@google.com> |
||||
Paweł Knap <pawelknap88@gmail.com> |
||||
Péter Szilágyi <peterke@gmail.com> |
||||
Sarah Adams <shadams@google.com> |
||||
Thanatat Tamtan <acoshift@gmail.com> |
||||
Toby Burress <kurin@google.com> |
||||
Tuo Shan <shantuo@google.com> |
||||
Tyler Treat <ttreat31@gmail.com> |
@ -0,0 +1 @@ |
||||
module github.com/golang/snappy |
@ -0,0 +1,29 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
package prometheus |
||||
|
||||
import "runtime/debug" |
||||
|
||||
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
|
||||
func readBuildInfo() (path, version, sum string) { |
||||
path, version, sum = "unknown", "unknown", "unknown" |
||||
if bi, ok := debug.ReadBuildInfo(); ok { |
||||
path = bi.Main.Path |
||||
version = bi.Main.Version |
||||
sum = bi.Main.Sum |
||||
} |
||||
return |
||||
} |
@ -0,0 +1,22 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.12
|
||||
|
||||
package prometheus |
||||
|
||||
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
|
||||
// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
|
||||
func readBuildInfo() (path, version, sum string) { |
||||
return "unknown", "unknown", "unknown" |
||||
} |
@ -1,505 +0,0 @@ |
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus |
||||
|
||||
import ( |
||||
"bufio" |
||||
"compress/gzip" |
||||
"io" |
||||
"net" |
||||
"net/http" |
||||
"strconv" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/prometheus/common/expfmt" |
||||
) |
||||
|
||||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
||||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
||||
// related should live. The functions here are just for avoiding
|
||||
// breakage. Everything is deprecated.
|
||||
|
||||
const ( |
||||
contentTypeHeader = "Content-Type" |
||||
contentEncodingHeader = "Content-Encoding" |
||||
acceptEncodingHeader = "Accept-Encoding" |
||||
) |
||||
|
||||
var gzipPool = sync.Pool{ |
||||
New: func() interface{} { |
||||
return gzip.NewWriter(nil) |
||||
}, |
||||
} |
||||
|
||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
||||
// name).
|
||||
//
|
||||
// Deprecated: Please note the issues described in the doc comment of
|
||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead.
|
||||
func Handler() http.Handler { |
||||
return InstrumentHandler("prometheus", UninstrumentedHandler()) |
||||
} |
||||
|
||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||
//
|
||||
// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
|
||||
// instead. See there for further documentation.
|
||||
func UninstrumentedHandler() http.Handler { |
||||
return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { |
||||
mfs, err := DefaultGatherer.Gather() |
||||
if err != nil { |
||||
httpError(rsp, err) |
||||
return |
||||
} |
||||
|
||||
contentType := expfmt.Negotiate(req.Header) |
||||
header := rsp.Header() |
||||
header.Set(contentTypeHeader, string(contentType)) |
||||
|
||||
w := io.Writer(rsp) |
||||
if gzipAccepted(req.Header) { |
||||
header.Set(contentEncodingHeader, "gzip") |
||||
gz := gzipPool.Get().(*gzip.Writer) |
||||
defer gzipPool.Put(gz) |
||||
|
||||
gz.Reset(w) |
||||
defer gz.Close() |
||||
|
||||
w = gz |
||||
} |
||||
|
||||
enc := expfmt.NewEncoder(w, contentType) |
||||
|
||||
for _, mf := range mfs { |
||||
if err := enc.Encode(mf); err != nil { |
||||
httpError(rsp, err) |
||||
return |
||||
} |
||||
} |
||||
}) |
||||
} |
||||
|
||||
var instLabels = []string{"method", "code"} |
||||
|
||||
type nower interface { |
||||
Now() time.Time |
||||
} |
||||
|
||||
type nowFunc func() time.Time |
||||
|
||||
func (n nowFunc) Now() time.Time { |
||||
return n() |
||||
} |
||||
|
||||
var now nower = nowFunc(func() time.Time { |
||||
return time.Now() |
||||
}) |
||||
|
||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
||||
// registers four metric collectors (if not already done) and reports HTTP
|
||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
||||
// (CounterVec), http_request_duration_microseconds (Summary),
|
||||
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
|
||||
// has a constant label named "handler" with the provided handlerName as
|
||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||
// (label name "method") and HTTP status code (label name "code").
|
||||
//
|
||||
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
|
||||
// package promhttp instead. The issues are the following: (1) It uses Summaries
|
||||
// rather than Histograms. Summaries are not useful if aggregation across
|
||||
// multiple instances is required. (2) It uses microseconds as unit, which is
|
||||
// deprecated and should be replaced by seconds. (3) The size of the request is
|
||||
// calculated in a separate goroutine. Since this calculator requires access to
|
||||
// the request header, it creates a race with any writes to the header performed
|
||||
// during request handling. httputil.ReverseProxy is a prominent example for a
|
||||
// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
|
||||
// https://github.com/prometheus/client_golang/issues/272.
|
||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { |
||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) |
||||
} |
||||
|
||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
||||
// issues).
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
||||
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { |
||||
return InstrumentHandlerFuncWithOpts( |
||||
SummaryOpts{ |
||||
Subsystem: "http", |
||||
ConstLabels: Labels{"handler": handlerName}, |
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, |
||||
}, |
||||
handlerFunc, |
||||
) |
||||
} |
||||
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
||||
// issues) but provides more flexibility (at the cost of a more complex call
|
||||
// syntax). As InstrumentHandler, this function registers four metric
|
||||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
||||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
||||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
||||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// help string. The names of the variable labels of the http_requests_total
|
||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
||||
//
|
||||
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
|
||||
// behavior of InstrumentHandler:
|
||||
//
|
||||
// prometheus.InstrumentHandlerWithOpts(
|
||||
// prometheus.SummaryOpts{
|
||||
// Subsystem: "http",
|
||||
// ConstLabels: prometheus.Labels{"handler": handlerName},
|
||||
// },
|
||||
// handler,
|
||||
// )
|
||||
//
|
||||
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
|
||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
||||
// and all its fields are set to the equally named fields in the provided
|
||||
// SummaryOpts.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
||||
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { |
||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) |
||||
} |
||||
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
||||
// SummaryOpts are used.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
||||
// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { |
||||
reqCnt := NewCounterVec( |
||||
CounterOpts{ |
||||
Namespace: opts.Namespace, |
||||
Subsystem: opts.Subsystem, |
||||
Name: "requests_total", |
||||
Help: "Total number of HTTP requests made.", |
||||
ConstLabels: opts.ConstLabels, |
||||
}, |
||||
instLabels, |
||||
) |
||||
if err := Register(reqCnt); err != nil { |
||||
if are, ok := err.(AlreadyRegisteredError); ok { |
||||
reqCnt = are.ExistingCollector.(*CounterVec) |
||||
} else { |
||||
panic(err) |
||||
} |
||||
} |
||||
|
||||
opts.Name = "request_duration_microseconds" |
||||
opts.Help = "The HTTP request latencies in microseconds." |
||||
reqDur := NewSummary(opts) |
||||
if err := Register(reqDur); err != nil { |
||||
if are, ok := err.(AlreadyRegisteredError); ok { |
||||
reqDur = are.ExistingCollector.(Summary) |
||||
} else { |
||||
panic(err) |
||||
} |
||||
} |
||||
|
||||
opts.Name = "request_size_bytes" |
||||
opts.Help = "The HTTP request sizes in bytes." |
||||
reqSz := NewSummary(opts) |
||||
if err := Register(reqSz); err != nil { |
||||
if are, ok := err.(AlreadyRegisteredError); ok { |
||||
reqSz = are.ExistingCollector.(Summary) |
||||
} else { |
||||
panic(err) |
||||
} |
||||
} |
||||
|
||||
opts.Name = "response_size_bytes" |
||||
opts.Help = "The HTTP response sizes in bytes." |
||||
resSz := NewSummary(opts) |
||||
if err := Register(resSz); err != nil { |
||||
if are, ok := err.(AlreadyRegisteredError); ok { |
||||
resSz = are.ExistingCollector.(Summary) |
||||
} else { |
||||
panic(err) |
||||
} |
||||
} |
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
now := time.Now() |
||||
|
||||
delegate := &responseWriterDelegator{ResponseWriter: w} |
||||
out := computeApproximateRequestSize(r) |
||||
|
||||
_, cn := w.(http.CloseNotifier) |
||||
_, fl := w.(http.Flusher) |
||||
_, hj := w.(http.Hijacker) |
||||
_, rf := w.(io.ReaderFrom) |
||||
var rw http.ResponseWriter |
||||
if cn && fl && hj && rf { |
||||
rw = &fancyResponseWriterDelegator{delegate} |
||||
} else { |
||||
rw = delegate |
||||
} |
||||
handlerFunc(rw, r) |
||||
|
||||
elapsed := float64(time.Since(now)) / float64(time.Microsecond) |
||||
|
||||
method := sanitizeMethod(r.Method) |
||||
code := sanitizeCode(delegate.status) |
||||
reqCnt.WithLabelValues(method, code).Inc() |
||||
reqDur.Observe(elapsed) |
||||
resSz.Observe(float64(delegate.written)) |
||||
reqSz.Observe(float64(<-out)) |
||||
}) |
||||
} |
||||
|
||||
func computeApproximateRequestSize(r *http.Request) <-chan int { |
||||
// Get URL length in current goroutine for avoiding a race condition.
|
||||
// HandlerFunc that runs in parallel may modify the URL.
|
||||
s := 0 |
||||
if r.URL != nil { |
||||
s += len(r.URL.String()) |
||||
} |
||||
|
||||
out := make(chan int, 1) |
||||
|
||||
go func() { |
||||
s += len(r.Method) |
||||
s += len(r.Proto) |
||||
for name, values := range r.Header { |
||||
s += len(name) |
||||
for _, value := range values { |
||||
s += len(value) |
||||
} |
||||
} |
||||
s += len(r.Host) |
||||
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 { |
||||
s += int(r.ContentLength) |
||||
} |
||||
out <- s |
||||
close(out) |
||||
}() |
||||
|
||||
return out |
||||
} |
||||
|
||||
type responseWriterDelegator struct { |
||||
http.ResponseWriter |
||||
|
||||
status int |
||||
written int64 |
||||
wroteHeader bool |
||||
} |
||||
|
||||
func (r *responseWriterDelegator) WriteHeader(code int) { |
||||
r.status = code |
||||
r.wroteHeader = true |
||||
r.ResponseWriter.WriteHeader(code) |
||||
} |
||||
|
||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) { |
||||
if !r.wroteHeader { |
||||
r.WriteHeader(http.StatusOK) |
||||
} |
||||
n, err := r.ResponseWriter.Write(b) |
||||
r.written += int64(n) |
||||
return n, err |
||||
} |
||||
|
||||
type fancyResponseWriterDelegator struct { |
||||
*responseWriterDelegator |
||||
} |
||||
|
||||
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { |
||||
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||
//remove support from client_golang yet.
|
||||
return f.ResponseWriter.(http.CloseNotifier).CloseNotify() |
||||
} |
||||
|
||||
func (f *fancyResponseWriterDelegator) Flush() { |
||||
f.ResponseWriter.(http.Flusher).Flush() |
||||
} |
||||
|
||||
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { |
||||
return f.ResponseWriter.(http.Hijacker).Hijack() |
||||
} |
||||
|
||||
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { |
||||
if !f.wroteHeader { |
||||
f.WriteHeader(http.StatusOK) |
||||
} |
||||
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) |
||||
f.written += n |
||||
return n, err |
||||
} |
||||
|
||||
func sanitizeMethod(m string) string { |
||||
switch m { |
||||
case "GET", "get": |
||||
return "get" |
||||
case "PUT", "put": |
||||
return "put" |
||||
case "HEAD", "head": |
||||
return "head" |
||||
case "POST", "post": |
||||
return "post" |
||||
case "DELETE", "delete": |
||||
return "delete" |
||||
case "CONNECT", "connect": |
||||
return "connect" |
||||
case "OPTIONS", "options": |
||||
return "options" |
||||
case "NOTIFY", "notify": |
||||
return "notify" |
||||
default: |
||||
return strings.ToLower(m) |
||||
} |
||||
} |
||||
|
||||
func sanitizeCode(s int) string { |
||||
switch s { |
||||
case 100: |
||||
return "100" |
||||
case 101: |
||||
return "101" |
||||
|
||||
case 200: |
||||
return "200" |
||||
case 201: |
||||
return "201" |
||||
case 202: |
||||
return "202" |
||||
case 203: |
||||
return "203" |
||||
case 204: |
||||
return "204" |
||||
case 205: |
||||
return "205" |
||||
case 206: |
||||
return "206" |
||||
|
||||
case 300: |
||||
return "300" |
||||
case 301: |
||||
return "301" |
||||
case 302: |
||||
return "302" |
||||
case 304: |
||||
return "304" |
||||
case 305: |
||||
return "305" |
||||
case 307: |
||||
return "307" |
||||
|
||||
case 400: |
||||
return "400" |
||||
case 401: |
||||
return "401" |
||||
case 402: |
||||
return "402" |
||||
case 403: |
||||
return "403" |
||||
case 404: |
||||
return "404" |
||||
case 405: |
||||
return "405" |
||||
case 406: |
||||
return "406" |
||||
case 407: |
||||
return "407" |
||||
case 408: |
||||
return "408" |
||||
case 409: |
||||
return "409" |
||||
case 410: |
||||
return "410" |
||||
case 411: |
||||
return "411" |
||||
case 412: |
||||
return "412" |
||||
case 413: |
||||
return "413" |
||||
case 414: |
||||
return "414" |
||||
case 415: |
||||
return "415" |
||||
case 416: |
||||
return "416" |
||||
case 417: |
||||
return "417" |
||||
case 418: |
||||
return "418" |
||||
|
||||
case 500: |
||||
return "500" |
||||
case 501: |
||||
return "501" |
||||
case 502: |
||||
return "502" |
||||
case 503: |
||||
return "503" |
||||
case 504: |
||||
return "504" |
||||
case 505: |
||||
return "505" |
||||
|
||||
case 428: |
||||
return "428" |
||||
case 429: |
||||
return "429" |
||||
case 431: |
||||
return "431" |
||||
case 511: |
||||
return "511" |
||||
|
||||
default: |
||||
return strconv.Itoa(s) |
||||
} |
||||
} |
||||
|
||||
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||
func gzipAccepted(header http.Header) bool { |
||||
a := header.Get(acceptEncodingHeader) |
||||
parts := strings.Split(a, ",") |
||||
for _, part := range parts { |
||||
part = strings.TrimSpace(part) |
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// httpError removes any content-encoding header and then calls http.Error with
|
||||
// the provided error and http.StatusInternalServerErrer. Error contents is
|
||||
// supposed to be uncompressed plain text. However, same as with a plain
|
||||
// http.Error, any header settings will be void if the header has already been
|
||||
// sent. The error message will still be written to the writer, but it will
|
||||
// probably be of limited use.
|
||||
func httpError(rsp http.ResponseWriter, err error) { |
||||
rsp.Header().Del(contentEncodingHeader) |
||||
http.Error( |
||||
rsp, |
||||
"An error has occurred while serving metrics:\n\n"+err.Error(), |
||||
http.StatusInternalServerError, |
||||
) |
||||
} |
65
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
65
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
@ -0,0 +1,65 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package prometheus |
||||
|
||||
import ( |
||||
"github.com/prometheus/procfs" |
||||
) |
||||
|
||||
func canCollectProcess() bool { |
||||
_, err := procfs.NewDefaultFS() |
||||
return err == nil |
||||
} |
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) { |
||||
pid, err := c.pidFn() |
||||
if err != nil { |
||||
c.reportError(ch, nil, err) |
||||
return |
||||
} |
||||
|
||||
p, err := procfs.NewProc(pid) |
||||
if err != nil { |
||||
c.reportError(ch, nil, err) |
||||
return |
||||
} |
||||
|
||||
if stat, err := p.Stat(); err == nil { |
||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) |
||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) |
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) |
||||
if startTime, err := stat.StartTime(); err == nil { |
||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) |
||||
} else { |
||||
c.reportError(ch, c.startTime, err) |
||||
} |
||||
} else { |
||||
c.reportError(ch, nil, err) |
||||
} |
||||
|
||||
if fds, err := p.FileDescriptorsLen(); err == nil { |
||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) |
||||
} else { |
||||
c.reportError(ch, c.openFDs, err) |
||||
} |
||||
|
||||
if limits, err := p.Limits(); err == nil { |
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) |
||||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) |
||||
} else { |
||||
c.reportError(ch, nil, err) |
||||
} |
||||
} |
112
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
112
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
@ -0,0 +1,112 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus |
||||
|
||||
import ( |
||||
"syscall" |
||||
"unsafe" |
||||
|
||||
"golang.org/x/sys/windows" |
||||
) |
||||
|
||||
func canCollectProcess() bool { |
||||
return true |
||||
} |
||||
|
||||
var ( |
||||
modpsapi = syscall.NewLazyDLL("psapi.dll") |
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll") |
||||
|
||||
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") |
||||
procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") |
||||
) |
||||
|
||||
type processMemoryCounters struct { |
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
|
||||
_ uint32 |
||||
PageFaultCount uint32 |
||||
PeakWorkingSetSize uint64 |
||||
WorkingSetSize uint64 |
||||
QuotaPeakPagedPoolUsage uint64 |
||||
QuotaPagedPoolUsage uint64 |
||||
QuotaPeakNonPagedPoolUsage uint64 |
||||
QuotaNonPagedPoolUsage uint64 |
||||
PagefileUsage uint64 |
||||
PeakPagefileUsage uint64 |
||||
PrivateUsage uint64 |
||||
} |
||||
|
||||
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { |
||||
mem := processMemoryCounters{} |
||||
r1, _, err := procGetProcessMemoryInfo.Call( |
||||
uintptr(handle), |
||||
uintptr(unsafe.Pointer(&mem)), |
||||
uintptr(unsafe.Sizeof(mem)), |
||||
) |
||||
if r1 != 1 { |
||||
return mem, err |
||||
} else { |
||||
return mem, nil |
||||
} |
||||
} |
||||
|
||||
func getProcessHandleCount(handle windows.Handle) (uint32, error) { |
||||
var count uint32 |
||||
r1, _, err := procGetProcessHandleCount.Call( |
||||
uintptr(handle), |
||||
uintptr(unsafe.Pointer(&count)), |
||||
) |
||||
if r1 != 1 { |
||||
return 0, err |
||||
} else { |
||||
return count, nil |
||||
} |
||||
} |
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) { |
||||
h, err := windows.GetCurrentProcess() |
||||
if err != nil { |
||||
c.reportError(ch, nil, err) |
||||
return |
||||
} |
||||
|
||||
var startTime, exitTime, kernelTime, userTime windows.Filetime |
||||
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) |
||||
if err != nil { |
||||
c.reportError(ch, nil, err) |
||||
return |
||||
} |
||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) |
||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) |
||||
|
||||
mem, err := getProcessMemoryInfo(h) |
||||
if err != nil { |
||||
c.reportError(ch, nil, err) |
||||
return |
||||
} |
||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) |
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) |
||||
|
||||
handles, err := getProcessHandleCount(h) |
||||
if err != nil { |
||||
c.reportError(ch, nil, err) |
||||
return |
||||
} |
||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) |
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
|
||||
} |
||||
|
||||
func fileTimeToSeconds(ft windows.Filetime) float64 { |
||||
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 |
||||
} |
@ -0,0 +1,85 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io/ioutil" |
||||
"net" |
||||
"strings" |
||||
) |
||||
|
||||
// ARPEntry contains a single row of the columnar data represented in
|
||||
// /proc/net/arp.
|
||||
type ARPEntry struct { |
||||
// IP address
|
||||
IPAddr net.IP |
||||
// MAC address
|
||||
HWAddr net.HardwareAddr |
||||
// Name of the device
|
||||
Device string |
||||
} |
||||
|
||||
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
|
||||
// and then return a slice of ARPEntry's.
|
||||
func (fs FS) GatherARPEntries() ([]ARPEntry, error) { |
||||
data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) |
||||
} |
||||
|
||||
return parseARPEntries(data) |
||||
} |
||||
|
||||
func parseARPEntries(data []byte) ([]ARPEntry, error) { |
||||
lines := strings.Split(string(data), "\n") |
||||
entries := make([]ARPEntry, 0) |
||||
var err error |
||||
const ( |
||||
expectedDataWidth = 6 |
||||
expectedHeaderWidth = 9 |
||||
) |
||||
for _, line := range lines { |
||||
columns := strings.Fields(line) |
||||
width := len(columns) |
||||
|
||||
if width == expectedHeaderWidth || width == 0 { |
||||
continue |
||||
} else if width == expectedDataWidth { |
||||
entry, err := parseARPEntry(columns) |
||||
if err != nil { |
||||
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) |
||||
} |
||||
entries = append(entries, entry) |
||||
} else { |
||||
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) |
||||
} |
||||
|
||||
} |
||||
|
||||
return entries, err |
||||
} |
||||
|
||||
func parseARPEntry(columns []string) (ARPEntry, error) { |
||||
ip := net.ParseIP(columns[0]) |
||||
mac := net.HardwareAddr(columns[3]) |
||||
|
||||
entry := ARPEntry{ |
||||
IPAddr: ip, |
||||
HWAddr: mac, |
||||
Device: columns[5], |
||||
} |
||||
|
||||
return entry, nil |
||||
} |
@ -0,0 +1,131 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/prometheus/procfs/internal/util" |
||||
) |
||||
|
||||
// Crypto holds info parsed from /proc/crypto.
|
||||
type Crypto struct { |
||||
Alignmask *uint64 |
||||
Async bool |
||||
Blocksize *uint64 |
||||
Chunksize *uint64 |
||||
Ctxsize *uint64 |
||||
Digestsize *uint64 |
||||
Driver string |
||||
Geniv string |
||||
Internal string |
||||
Ivsize *uint64 |
||||
Maxauthsize *uint64 |
||||
MaxKeysize *uint64 |
||||
MinKeysize *uint64 |
||||
Module string |
||||
Name string |
||||
Priority *int64 |
||||
Refcnt *int64 |
||||
Seedsize *uint64 |
||||
Selftest string |
||||
Type string |
||||
Walksize *uint64 |
||||
} |
||||
|
||||
// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
|
||||
// structs containing the relevant info. More information available here:
|
||||
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
|
||||
func (fs FS) Crypto() ([]Crypto, error) { |
||||
data, err := ioutil.ReadFile(fs.proc.Path("crypto")) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) |
||||
} |
||||
crypto, err := parseCrypto(data) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) |
||||
} |
||||
return crypto, nil |
||||
} |
||||
|
||||
func parseCrypto(cryptoData []byte) ([]Crypto, error) { |
||||
crypto := []Crypto{} |
||||
|
||||
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n")) |
||||
|
||||
for _, block := range cryptoBlocks { |
||||
var newCryptoElem Crypto |
||||
|
||||
lines := strings.Split(string(block), "\n") |
||||
for _, line := range lines { |
||||
if strings.TrimSpace(line) == "" || line[0] == ' ' { |
||||
continue |
||||
} |
||||
fields := strings.Split(line, ":") |
||||
key := strings.TrimSpace(fields[0]) |
||||
value := strings.TrimSpace(fields[1]) |
||||
vp := util.NewValueParser(value) |
||||
|
||||
switch strings.TrimSpace(key) { |
||||
case "async": |
||||
b, err := strconv.ParseBool(value) |
||||
if err == nil { |
||||
newCryptoElem.Async = b |
||||
} |
||||
case "blocksize": |
||||
newCryptoElem.Blocksize = vp.PUInt64() |
||||
case "chunksize": |
||||
newCryptoElem.Chunksize = vp.PUInt64() |
||||
case "digestsize": |
||||
newCryptoElem.Digestsize = vp.PUInt64() |
||||
case "driver": |
||||
newCryptoElem.Driver = value |
||||
case "geniv": |
||||
newCryptoElem.Geniv = value |
||||
case "internal": |
||||
newCryptoElem.Internal = value |
||||
case "ivsize": |
||||
newCryptoElem.Ivsize = vp.PUInt64() |
||||
case "maxauthsize": |
||||
newCryptoElem.Maxauthsize = vp.PUInt64() |
||||
case "max keysize": |
||||
newCryptoElem.MaxKeysize = vp.PUInt64() |
||||
case "min keysize": |
||||
newCryptoElem.MinKeysize = vp.PUInt64() |
||||
case "module": |
||||
newCryptoElem.Module = value |
||||
case "name": |
||||
newCryptoElem.Name = value |
||||
case "priority": |
||||
newCryptoElem.Priority = vp.PInt64() |
||||
case "refcnt": |
||||
newCryptoElem.Refcnt = vp.PInt64() |
||||
case "seedsize": |
||||
newCryptoElem.Seedsize = vp.PUInt64() |
||||
case "selftest": |
||||
newCryptoElem.Selftest = value |
||||
case "type": |
||||
newCryptoElem.Type = value |
||||
case "walksize": |
||||
newCryptoElem.Walksize = vp.PUInt64() |
||||
} |
||||
} |
||||
crypto = append(crypto, newCryptoElem) |
||||
} |
||||
return crypto, nil |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,6 @@ |
||||
module github.com/prometheus/procfs |
||||
|
||||
require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 |
||||
require ( |
||||
github.com/google/go-cmp v0.3.0 |
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 |
||||
) |
||||
|
@ -1,2 +1,4 @@ |
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= |
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= |
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= |
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||
|
@ -0,0 +1,88 @@ |
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package util |
||||
|
||||
import ( |
||||
"io/ioutil" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
||||
func ParseUint32s(ss []string) ([]uint32, error) { |
||||
us := make([]uint32, 0, len(ss)) |
||||
for _, s := range ss { |
||||
u, err := strconv.ParseUint(s, 10, 32) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
us = append(us, uint32(u)) |
||||
} |
||||
|
||||
return us, nil |
||||
} |
||||
|
||||
// ParseUint64s parses a slice of strings into a slice of uint64s.
|
||||
func ParseUint64s(ss []string) ([]uint64, error) { |
||||
us := make([]uint64, 0, len(ss)) |
||||
for _, s := range ss { |
||||
u, err := strconv.ParseUint(s, 10, 64) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
us = append(us, u) |
||||
} |
||||
|
||||
return us, nil |
||||
} |
||||
|
||||
// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
|
||||
func ParsePInt64s(ss []string) ([]*int64, error) { |
||||
us := make([]*int64, 0, len(ss)) |
||||
for _, s := range ss { |
||||
u, err := strconv.ParseInt(s, 10, 64) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
us = append(us, &u) |
||||
} |
||||
|
||||
return us, nil |
||||
} |
||||
|
||||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||
func ReadUintFromFile(path string) (uint64, error) { |
||||
data, err := ioutil.ReadFile(path) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) |
||||
} |
||||
|
||||
// ParseBool parses a string into a boolean pointer.
|
||||
func ParseBool(b string) *bool { |
||||
var truth bool |
||||
switch b { |
||||
case "enabled": |
||||
truth = true |
||||
case "disabled": |
||||
truth = false |
||||
default: |
||||
return nil |
||||
} |
||||
return &truth |
||||
} |
@ -0,0 +1,45 @@ |
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux,!appengine
|
||||
|
||||
package util |
||||
|
||||
import ( |
||||
"bytes" |
||||
"os" |
||||
"syscall" |
||||
) |
||||
|
||||
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
||||
// https://github.com/prometheus/node_exporter/pull/728/files
|
||||
func SysReadFile(file string) (string, error) { |
||||
f, err := os.Open(file) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
defer f.Close() |
||||
|
||||
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
||||
// Go's ioutil.ReadFile implementation to poll forever.
|
||||
//
|
||||
// Since we either want to read data or bail immediately, do the simplest
|
||||
// possible read using syscall directly.
|
||||
b := make([]byte, 128) |
||||
n, err := syscall.Read(int(f.Fd()), b) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
return string(bytes.TrimSpace(b[:n])), nil |
||||
} |
@ -0,0 +1,26 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux,appengine !linux
|
||||
|
||||
package util |
||||
|
||||
import ( |
||||
"fmt" |
||||
) |
||||
|
||||
// SysReadFile is here implemented as a noop for builds that do not support
|
||||
// the read syscall. For example Windows, or Linux on Google App Engine.
|
||||
func SysReadFile(file string) (string, error) { |
||||
return "", fmt.Errorf("not supported on this platform") |
||||
} |
@ -0,0 +1,77 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package util |
||||
|
||||
import ( |
||||
"strconv" |
||||
) |
||||
|
||||
// TODO(mdlayher): util packages are an anti-pattern and this should be moved
|
||||
// somewhere else that is more focused in the future.
|
||||
|
||||
// A ValueParser enables parsing a single string into a variety of data types
|
||||
// in a concise and safe way. The Err method must be invoked after invoking
|
||||
// any other methods to ensure a value was successfully parsed.
|
||||
type ValueParser struct { |
||||
v string |
||||
err error |
||||
} |
||||
|
||||
// NewValueParser creates a ValueParser using the input string.
|
||||
func NewValueParser(v string) *ValueParser { |
||||
return &ValueParser{v: v} |
||||
} |
||||
|
||||
// PInt64 interprets the underlying value as an int64 and returns a pointer to
|
||||
// that value.
|
||||
func (vp *ValueParser) PInt64() *int64 { |
||||
if vp.err != nil { |
||||
return nil |
||||
} |
||||
|
||||
// A base value of zero makes ParseInt infer the correct base using the
|
||||
// string's prefix, if any.
|
||||
const base = 0 |
||||
v, err := strconv.ParseInt(vp.v, base, 64) |
||||
if err != nil { |
||||
vp.err = err |
||||
return nil |
||||
} |
||||
|
||||
return &v |
||||
} |
||||
|
||||
// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
|
||||
// that value.
|
||||
func (vp *ValueParser) PUInt64() *uint64 { |
||||
if vp.err != nil { |
||||
return nil |
||||
} |
||||
|
||||
// A base value of zero makes ParseInt infer the correct base using the
|
||||
// string's prefix, if any.
|
||||
const base = 0 |
||||
v, err := strconv.ParseUint(vp.v, base, 64) |
||||
if err != nil { |
||||
vp.err = err |
||||
return nil |
||||
} |
||||
|
||||
return &v |
||||
} |
||||
|
||||
// Err returns the last error, if any, encountered by the ValueParser.
|
||||
func (vp *ValueParser) Err() error { |
||||
return vp.err |
||||
} |
@ -0,0 +1,178 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bufio" |
||||
"fmt" |
||||
"io" |
||||
"os" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
var validOptionalFields = map[string]bool{ |
||||
"shared": true, |
||||
"master": true, |
||||
"propagate_from": true, |
||||
"unbindable": true, |
||||
} |
||||
|
||||
// A MountInfo is a type that describes the details, options
|
||||
// for each mount, parsed from /proc/self/mountinfo.
|
||||
// The fields described in each entry of /proc/self/mountinfo
|
||||
// is described in the following man page.
|
||||
// http://man7.org/linux/man-pages/man5/proc.5.html
|
||||
type MountInfo struct { |
||||
// Unique Id for the mount
|
||||
MountId int |
||||
// The Id of the parent mount
|
||||
ParentId int |
||||
// The value of `st_dev` for the files on this FS
|
||||
MajorMinorVer string |
||||
// The pathname of the directory in the FS that forms
|
||||
// the root for this mount
|
||||
Root string |
||||
// The pathname of the mount point relative to the root
|
||||
MountPoint string |
||||
// Mount options
|
||||
Options map[string]string |
||||
// Zero or more optional fields
|
||||
OptionalFields map[string]string |
||||
// The Filesystem type
|
||||
FSType string |
||||
// FS specific information or "none"
|
||||
Source string |
||||
// Superblock options
|
||||
SuperOptions map[string]string |
||||
} |
||||
|
||||
// Returns part of the mountinfo line, if it exists, else an empty string.
|
||||
func getStringSliceElement(parts []string, idx int, defaultValue string) string { |
||||
if idx >= len(parts) { |
||||
return defaultValue |
||||
} |
||||
return parts[idx] |
||||
} |
||||
|
||||
// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
|
||||
func parseMountInfo(r io.Reader) ([]*MountInfo, error) { |
||||
mounts := []*MountInfo{} |
||||
scanner := bufio.NewScanner(r) |
||||
for scanner.Scan() { |
||||
mountString := scanner.Text() |
||||
parsedMounts, err := parseMountInfoString(mountString) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
mounts = append(mounts, parsedMounts) |
||||
} |
||||
|
||||
err := scanner.Err() |
||||
return mounts, err |
||||
} |
||||
|
||||
// Parses a mountinfo file line, and converts it to a MountInfo struct.
|
||||
// An important check here is to see if the hyphen separator, as if it does not exist,
|
||||
// it means that the line is malformed.
|
||||
func parseMountInfoString(mountString string) (*MountInfo, error) { |
||||
var err error |
||||
|
||||
// OptionalFields can be zero, hence these checks to ensure we do not populate the wrong values in the wrong spots
|
||||
separatorIndex := strings.Index(mountString, "-") |
||||
if separatorIndex == -1 { |
||||
return nil, fmt.Errorf("no separator found in mountinfo string: %s", mountString) |
||||
} |
||||
beforeFields := strings.Fields(mountString[:separatorIndex]) |
||||
afterFields := strings.Fields(mountString[separatorIndex+1:]) |
||||
if (len(beforeFields) + len(afterFields)) < 7 { |
||||
return nil, fmt.Errorf("too few fields") |
||||
} |
||||
|
||||
mount := &MountInfo{ |
||||
MajorMinorVer: getStringSliceElement(beforeFields, 2, ""), |
||||
Root: getStringSliceElement(beforeFields, 3, ""), |
||||
MountPoint: getStringSliceElement(beforeFields, 4, ""), |
||||
Options: mountOptionsParser(getStringSliceElement(beforeFields, 5, "")), |
||||
OptionalFields: nil, |
||||
FSType: getStringSliceElement(afterFields, 0, ""), |
||||
Source: getStringSliceElement(afterFields, 1, ""), |
||||
SuperOptions: mountOptionsParser(getStringSliceElement(afterFields, 2, "")), |
||||
} |
||||
|
||||
mount.MountId, err = strconv.Atoi(getStringSliceElement(beforeFields, 0, "")) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to parse mount ID") |
||||
} |
||||
mount.ParentId, err = strconv.Atoi(getStringSliceElement(beforeFields, 1, "")) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to parse parent ID") |
||||
} |
||||
// Has optional fields, which is a space separated list of values.
|
||||
// Example: shared:2 master:7
|
||||
if len(beforeFields) > 6 { |
||||
mount.OptionalFields = make(map[string]string) |
||||
optionalFields := beforeFields[6:] |
||||
for _, field := range optionalFields { |
||||
optionSplit := strings.Split(field, ":") |
||||
target, value := optionSplit[0], "" |
||||
if len(optionSplit) == 2 { |
||||
value = optionSplit[1] |
||||
} |
||||
// Checks if the 'keys' in the optional fields in the mountinfo line are acceptable.
|
||||
// Allowed 'keys' are shared, master, propagate_from, unbindable.
|
||||
if _, ok := validOptionalFields[target]; ok { |
||||
mount.OptionalFields[target] = value |
||||
} |
||||
} |
||||
} |
||||
return mount, nil |
||||
} |
||||
|
||||
// Parses the mount options, superblock options.
|
||||
func mountOptionsParser(mountOptions string) map[string]string { |
||||
opts := make(map[string]string) |
||||
options := strings.Split(mountOptions, ",") |
||||
for _, opt := range options { |
||||
splitOption := strings.Split(opt, "=") |
||||
if len(splitOption) < 2 { |
||||
key := splitOption[0] |
||||
opts[key] = "" |
||||
} else { |
||||
key, value := splitOption[0], splitOption[1] |
||||
opts[key] = value |
||||
} |
||||
} |
||||
return opts |
||||
} |
||||
|
||||
// Retrieves mountinfo information from `/proc/self/mountinfo`.
|
||||
func GetMounts() ([]*MountInfo, error) { |
||||
f, err := os.Open("/proc/self/mountinfo") |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer f.Close() |
||||
return parseMountInfo(f) |
||||
} |
||||
|
||||
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
|
||||
func GetProcMounts(pid int) ([]*MountInfo, error) { |
||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer f.Close() |
||||
return parseMountInfo(f) |
||||
} |
@ -0,0 +1,91 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io/ioutil" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
// For the proc file format details,
|
||||
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
||||
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
|
||||
|
||||
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
|
||||
type SoftnetEntry struct { |
||||
// Number of processed packets
|
||||
Processed uint |
||||
// Number of dropped packets
|
||||
Dropped uint |
||||
// Number of times processing packets ran out of quota
|
||||
TimeSqueezed uint |
||||
} |
||||
|
||||
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
|
||||
// and then return a slice of SoftnetEntry's.
|
||||
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) { |
||||
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat")) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err) |
||||
} |
||||
|
||||
return parseSoftnetEntries(data) |
||||
} |
||||
|
||||
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) { |
||||
lines := strings.Split(string(data), "\n") |
||||
entries := make([]SoftnetEntry, 0) |
||||
var err error |
||||
const ( |
||||
expectedColumns = 11 |
||||
) |
||||
for _, line := range lines { |
||||
columns := strings.Fields(line) |
||||
width := len(columns) |
||||
if width == 0 { |
||||
continue |
||||
} |
||||
if width != expectedColumns { |
||||
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns) |
||||
} |
||||
var entry SoftnetEntry |
||||
if entry, err = parseSoftnetEntry(columns); err != nil { |
||||
return []SoftnetEntry{}, err |
||||
} |
||||
entries = append(entries, entry) |
||||
} |
||||
|
||||
return entries, nil |
||||
} |
||||
|
||||
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) { |
||||
var err error |
||||
var processed, dropped, timeSqueezed uint64 |
||||
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil { |
||||
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err) |
||||
} |
||||
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil { |
||||
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err) |
||||
} |
||||
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil { |
||||
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err) |
||||
} |
||||
return SoftnetEntry{ |
||||
Processed: uint(processed), |
||||
Dropped: uint(dropped), |
||||
TimeSqueezed: uint(timeSqueezed), |
||||
}, nil |
||||
} |
@ -0,0 +1,275 @@ |
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bufio" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"os" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
// For the proc file format details,
|
||||
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
|
||||
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
|
||||
|
||||
const ( |
||||
netUnixKernelPtrIdx = iota |
||||
netUnixRefCountIdx |
||||
_ |
||||
netUnixFlagsIdx |
||||
netUnixTypeIdx |
||||
netUnixStateIdx |
||||
netUnixInodeIdx |
||||
|
||||
// Inode and Path are optional.
|
||||
netUnixStaticFieldsCnt = 6 |
||||
) |
||||
|
||||
const ( |
||||
netUnixTypeStream = 1 |
||||
netUnixTypeDgram = 2 |
||||
netUnixTypeSeqpacket = 5 |
||||
|
||||
netUnixFlagListen = 1 << 16 |
||||
|
||||
netUnixStateUnconnected = 1 |
||||
netUnixStateConnecting = 2 |
||||
netUnixStateConnected = 3 |
||||
netUnixStateDisconnected = 4 |
||||
) |
||||
|
||||
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format") |
||||
|
||||
// NetUnixType is the type of the type field.
|
||||
type NetUnixType uint64 |
||||
|
||||
// NetUnixFlags is the type of the flags field.
|
||||
type NetUnixFlags uint64 |
||||
|
||||
// NetUnixState is the type of the state field.
|
||||
type NetUnixState uint64 |
||||
|
||||
// NetUnixLine represents a line of /proc/net/unix.
|
||||
type NetUnixLine struct { |
||||
KernelPtr string |
||||
RefCount uint64 |
||||
Protocol uint64 |
||||
Flags NetUnixFlags |
||||
Type NetUnixType |
||||
State NetUnixState |
||||
Inode uint64 |
||||
Path string |
||||
} |
||||
|
||||
// NetUnix holds the data read from /proc/net/unix.
|
||||
type NetUnix struct { |
||||
Rows []*NetUnixLine |
||||
} |
||||
|
||||
// NewNetUnix returns data read from /proc/net/unix.
|
||||
func NewNetUnix() (*NetUnix, error) { |
||||
fs, err := NewFS(DefaultMountPoint) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return fs.NewNetUnix() |
||||
} |
||||
|
||||
// NewNetUnix returns data read from /proc/net/unix.
|
||||
func (fs FS) NewNetUnix() (*NetUnix, error) { |
||||
return NewNetUnixByPath(fs.proc.Path("net/unix")) |
||||
} |
||||
|
||||
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
|
||||
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||
func NewNetUnixByPath(path string) (*NetUnix, error) { |
||||
f, err := os.Open(path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer f.Close() |
||||
return NewNetUnixByReader(f) |
||||
} |
||||
|
||||
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
|
||||
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) { |
||||
nu := &NetUnix{ |
||||
Rows: make([]*NetUnixLine, 0, 32), |
||||
} |
||||
scanner := bufio.NewScanner(reader) |
||||
// Omit the header line.
|
||||
scanner.Scan() |
||||
header := scanner.Text() |
||||
// From the man page of proc(5), it does not contain an Inode field,
|
||||
// but in actually it exists.
|
||||
// This code works for both cases.
|
||||
hasInode := strings.Contains(header, "Inode") |
||||
|
||||
minFieldsCnt := netUnixStaticFieldsCnt |
||||
if hasInode { |
||||
minFieldsCnt++ |
||||
} |
||||
for scanner.Scan() { |
||||
line := scanner.Text() |
||||
item, err := nu.parseLine(line, hasInode, minFieldsCnt) |
||||
if err != nil { |
||||
return nu, err |
||||
} |
||||
nu.Rows = append(nu.Rows, item) |
||||
} |
||||
|
||||
return nu, scanner.Err() |
||||
} |
||||
|
||||
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) { |
||||
fields := strings.Fields(line) |
||||
fieldsLen := len(fields) |
||||
if fieldsLen < minFieldsCnt { |
||||
return nil, fmt.Errorf( |
||||
"Parse Unix domain failed: expect at least %d fields but got %d", |
||||
minFieldsCnt, fieldsLen) |
||||
} |
||||
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx]) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err) |
||||
} |
||||
users, err := u.parseUsers(fields[netUnixRefCountIdx]) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err) |
||||
} |
||||
flags, err := u.parseFlags(fields[netUnixFlagsIdx]) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err) |
||||
} |
||||
typ, err := u.parseType(fields[netUnixTypeIdx]) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err) |
||||
} |
||||
state, err := u.parseState(fields[netUnixStateIdx]) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err) |
||||
} |
||||
var inode uint64 |
||||
if hasInode { |
||||
inodeStr := fields[netUnixInodeIdx] |
||||
inode, err = u.parseInode(inodeStr) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err) |
||||
} |
||||
} |
||||
|
||||
nuLine := &NetUnixLine{ |
||||
KernelPtr: kernelPtr, |
||||
RefCount: users, |
||||
Type: typ, |
||||
Flags: flags, |
||||
State: state, |
||||
Inode: inode, |
||||
} |
||||
|
||||
// Path field is optional.
|
||||
if fieldsLen > minFieldsCnt { |
||||
pathIdx := netUnixInodeIdx + 1 |
||||
if !hasInode { |
||||
pathIdx-- |
||||
} |
||||
nuLine.Path = fields[pathIdx] |
||||
} |
||||
|
||||
return nuLine, nil |
||||
} |
||||
|
||||
func (u NetUnix) parseKernelPtr(str string) (string, error) { |
||||
if !strings.HasSuffix(str, ":") { |
||||
return "", errInvalidKernelPtrFmt |
||||
} |
||||
return str[:len(str)-1], nil |
||||
} |
||||
|
||||
func (u NetUnix) parseUsers(hexStr string) (uint64, error) { |
||||
return strconv.ParseUint(hexStr, 16, 32) |
||||
} |
||||
|
||||
func (u NetUnix) parseProtocol(hexStr string) (uint64, error) { |
||||
return strconv.ParseUint(hexStr, 16, 32) |
||||
} |
||||
|
||||
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) { |
||||
typ, err := strconv.ParseUint(hexStr, 16, 16) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
return NetUnixType(typ), nil |
||||
} |
||||
|
||||
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) { |
||||
flags, err := strconv.ParseUint(hexStr, 16, 32) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
return NetUnixFlags(flags), nil |
||||
} |
||||
|
||||
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) { |
||||
st, err := strconv.ParseInt(hexStr, 16, 8) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
return NetUnixState(st), nil |
||||
} |
||||
|
||||
func (u NetUnix) parseInode(inodeStr string) (uint64, error) { |
||||
return strconv.ParseUint(inodeStr, 10, 64) |
||||
} |
||||
|
||||
func (t NetUnixType) String() string { |
||||
switch t { |
||||
case netUnixTypeStream: |
||||
return "stream" |
||||
case netUnixTypeDgram: |
||||
return "dgram" |
||||
case netUnixTypeSeqpacket: |
||||
return "seqpacket" |
||||
} |
||||
return "unknown" |
||||
} |
||||
|
||||
func (f NetUnixFlags) String() string { |
||||
switch f { |
||||
case netUnixFlagListen: |
||||
return "listen" |
||||
default: |
||||
return "default" |
||||
} |
||||
} |
||||
|
||||
func (s NetUnixState) String() string { |
||||
switch s { |
||||
case netUnixStateUnconnected: |
||||
return "unconnected" |
||||
case netUnixStateConnecting: |
||||
return "connecting" |
||||
case netUnixStateConnected: |
||||
return "connected" |
||||
case netUnixStateDisconnected: |
||||
return "disconnected" |
||||
} |
||||
return "unknown" |
||||
} |
@ -0,0 +1,43 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"io/ioutil" |
||||
"os" |
||||
"strings" |
||||
) |
||||
|
||||
// Environ reads process environments from /proc/<pid>/environ
|
||||
func (p Proc) Environ() ([]string, error) { |
||||
environments := make([]string, 0) |
||||
|
||||
f, err := os.Open(p.path("environ")) |
||||
if err != nil { |
||||
return environments, err |
||||
} |
||||
defer f.Close() |
||||
|
||||
data, err := ioutil.ReadAll(f) |
||||
if err != nil { |
||||
return environments, err |
||||
} |
||||
|
||||
environments = strings.Split(string(data), "\000") |
||||
if len(environments) > 0 { |
||||
environments = environments[:len(environments)-1] |
||||
} |
||||
|
||||
return environments, nil |
||||
} |
@ -0,0 +1,132 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bufio" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"os" |
||||
"regexp" |
||||
"strings" |
||||
) |
||||
|
||||
// Regexp variables
|
||||
var ( |
||||
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) |
||||
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) |
||||
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) |
||||
rInotify = regexp.MustCompile(`^inotify`) |
||||
) |
||||
|
||||
// ProcFDInfo contains represents file descriptor information.
|
||||
type ProcFDInfo struct { |
||||
// File descriptor
|
||||
FD string |
||||
// File offset
|
||||
Pos string |
||||
// File access mode and status flags
|
||||
Flags string |
||||
// Mount point ID
|
||||
MntID string |
||||
// List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only)
|
||||
InotifyInfos []InotifyInfo |
||||
} |
||||
|
||||
// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
|
||||
func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { |
||||
f, err := os.Open(p.path("fdinfo", fd)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer f.Close() |
||||
|
||||
fdinfo, err := ioutil.ReadAll(f) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("could not read %s: %s", f.Name(), err) |
||||
} |
||||
|
||||
var text, pos, flags, mntid string |
||||
var inotify []InotifyInfo |
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(string(fdinfo))) |
||||
for scanner.Scan() { |
||||
text = scanner.Text() |
||||
if rPos.MatchString(text) { |
||||
pos = rPos.FindStringSubmatch(text)[1] |
||||
} else if rFlags.MatchString(text) { |
||||
flags = rFlags.FindStringSubmatch(text)[1] |
||||
} else if rMntID.MatchString(text) { |
||||
mntid = rMntID.FindStringSubmatch(text)[1] |
||||
} else if rInotify.MatchString(text) { |
||||
newInotify, err := parseInotifyInfo(text) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
inotify = append(inotify, *newInotify) |
||||
} |
||||
} |
||||
|
||||
i := &ProcFDInfo{ |
||||
FD: fd, |
||||
Pos: pos, |
||||
Flags: flags, |
||||
MntID: mntid, |
||||
InotifyInfos: inotify, |
||||
} |
||||
|
||||
return i, nil |
||||
} |
||||
|
||||
// InotifyInfo represents a single inotify line in the fdinfo file.
|
||||
type InotifyInfo struct { |
||||
// Watch descriptor number
|
||||
WD string |
||||
// Inode number
|
||||
Ino string |
||||
// Device ID
|
||||
Sdev string |
||||
// Mask of events being monitored
|
||||
Mask string |
||||
} |
||||
|
||||
// InotifyInfo constructor. Only available on kernel 3.8+.
|
||||
func parseInotifyInfo(line string) (*InotifyInfo, error) { |
||||
r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`) |
||||
m := r.FindStringSubmatch(line) |
||||
i := &InotifyInfo{ |
||||
WD: m[1], |
||||
Ino: m[2], |
||||
Sdev: m[3], |
||||
Mask: m[4], |
||||
} |
||||
return i, nil |
||||
} |
||||
|
||||
// ProcFDInfos represents a list of ProcFDInfo structs.
|
||||
type ProcFDInfos []ProcFDInfo |
||||
|
||||
func (p ProcFDInfos) Len() int { return len(p) } |
||||
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } |
||||
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } |
||||
|
||||
// InotifyWatchLen returns the total number of inotify watches
|
||||
func (p ProcFDInfos) InotifyWatchLen() (int, error) { |
||||
length := 0 |
||||
for _, f := range p { |
||||
length += len(f.InotifyInfos) |
||||
} |
||||
|
||||
return length, nil |
||||
} |
@ -0,0 +1,162 @@ |
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bytes" |
||||
"io/ioutil" |
||||
"os" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
// ProcStat provides status information about the process,
|
||||
// read from /proc/[pid]/stat.
|
||||
type ProcStatus struct { |
||||
// The process ID.
|
||||
PID int |
||||
// The process name.
|
||||
Name string |
||||
|
||||
// Peak virtual memory size.
|
||||
VmPeak uint64 |
||||
// Virtual memory size.
|
||||
VmSize uint64 |
||||
// Locked memory size.
|
||||
VmLck uint64 |
||||
// Pinned memory size.
|
||||
VmPin uint64 |
||||
// Peak resident set size.
|
||||
VmHWM uint64 |
||||
// Resident set size (sum of RssAnnon RssFile and RssShmem).
|
||||
VmRSS uint64 |
||||
// Size of resident anonymous memory.
|
||||
RssAnon uint64 |
||||
// Size of resident file mappings.
|
||||
RssFile uint64 |
||||
// Size of resident shared memory.
|
||||
RssShmem uint64 |
||||
// Size of data segments.
|
||||
VmData uint64 |
||||
// Size of stack segments.
|
||||
VmStk uint64 |
||||
// Size of text segments.
|
||||
VmExe uint64 |
||||
// Shared library code size.
|
||||
VmLib uint64 |
||||
// Page table entries size.
|
||||
VmPTE uint64 |
||||
// Size of second-level page tables.
|
||||
VmPMD uint64 |
||||
// Swapped-out virtual memory size by anonymous private.
|
||||
VmSwap uint64 |
||||
// Size of hugetlb memory portions
|
||||
HugetlbPages uint64 |
||||
|
||||
// Number of voluntary context switches.
|
||||
VoluntaryCtxtSwitches uint64 |
||||
// Number of involuntary context switches.
|
||||
NonVoluntaryCtxtSwitches uint64 |
||||
} |
||||
|
||||
// NewStatus returns the current status information of the process.
|
||||
func (p Proc) NewStatus() (ProcStatus, error) { |
||||
f, err := os.Open(p.path("status")) |
||||
if err != nil { |
||||
return ProcStatus{}, err |
||||
} |
||||
defer f.Close() |
||||
|
||||
data, err := ioutil.ReadAll(f) |
||||
if err != nil { |
||||
return ProcStatus{}, err |
||||
} |
||||
|
||||
s := ProcStatus{PID: p.PID} |
||||
|
||||
lines := strings.Split(string(data), "\n") |
||||
for _, line := range lines { |
||||
if !bytes.Contains([]byte(line), []byte(":")) { |
||||
continue |
||||
} |
||||
|
||||
kv := strings.SplitN(line, ":", 2) |
||||
|
||||
// removes spaces
|
||||
k := string(strings.TrimSpace(kv[0])) |
||||
v := string(strings.TrimSpace(kv[1])) |
||||
// removes "kB"
|
||||
v = string(bytes.Trim([]byte(v), " kB")) |
||||
|
||||
// value to int when possible
|
||||
// we can skip error check here, 'cause vKBytes is not used when value is a string
|
||||
vKBytes, _ := strconv.ParseUint(v, 10, 64) |
||||
// convert kB to B
|
||||
vBytes := vKBytes * 1024 |
||||
|
||||
s.fillStatus(k, v, vKBytes, vBytes) |
||||
} |
||||
|
||||
return s, nil |
||||
} |
||||
|
||||
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { |
||||
switch k { |
||||
case "Name": |
||||
s.Name = vString |
||||
case "VmPeak": |
||||
s.VmPeak = vUintBytes |
||||
case "VmSize": |
||||
s.VmSize = vUintBytes |
||||
case "VmLck": |
||||
s.VmLck = vUintBytes |
||||
case "VmPin": |
||||
s.VmPin = vUintBytes |
||||
case "VmHWM": |
||||
s.VmHWM = vUintBytes |
||||
case "VmRSS": |
||||
s.VmRSS = vUintBytes |
||||
case "RssAnon": |
||||
s.RssAnon = vUintBytes |
||||
case "RssFile": |
||||
s.RssFile = vUintBytes |
||||
case "RssShmem": |
||||
s.RssShmem = vUintBytes |
||||
case "VmData": |
||||
s.VmData = vUintBytes |
||||
case "VmStk": |
||||
s.VmStk = vUintBytes |
||||
case "VmExe": |
||||
s.VmExe = vUintBytes |
||||
case "VmLib": |
||||
s.VmLib = vUintBytes |
||||
case "VmPTE": |
||||
s.VmPTE = vUintBytes |
||||
case "VmPMD": |
||||
s.VmPMD = vUintBytes |
||||
case "VmSwap": |
||||
s.VmSwap = vUintBytes |
||||
case "HugetlbPages": |
||||
s.HugetlbPages = vUintBytes |
||||
case "voluntary_ctxt_switches": |
||||
s.VoluntaryCtxtSwitches = vUint |
||||
case "nonvoluntary_ctxt_switches": |
||||
s.NonVoluntaryCtxtSwitches = vUint |
||||
} |
||||
} |
||||
|
||||
// TotalCtxtSwitches returns the total context switch.
|
||||
func (s ProcStatus) TotalCtxtSwitches() uint64 { |
||||
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches |
||||
} |
@ -0,0 +1,118 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bufio" |
||||
"errors" |
||||
"os" |
||||
"regexp" |
||||
"strconv" |
||||
) |
||||
|
||||
var ( |
||||
cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) |
||||
procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) |
||||
) |
||||
|
||||
// Schedstat contains scheduler statistics from /proc/schedstats
|
||||
//
|
||||
// See
|
||||
// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
|
||||
// for a detailed description of what these numbers mean.
|
||||
//
|
||||
// Note the current kernel documentation claims some of the time units are in
|
||||
// jiffies when they are actually in nanoseconds since 2.6.23 with the
|
||||
// introduction of CFS. A fix to the documentation is pending. See
|
||||
// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
|
||||
|
||||
type Schedstat struct { |
||||
CPUs []*SchedstatCPU |
||||
} |
||||
|
||||
// SchedstatCPU contains the values from one "cpu<N>" line
|
||||
type SchedstatCPU struct { |
||||
CPUNum string |
||||
|
||||
RunningNanoseconds uint64 |
||||
WaitingNanoseconds uint64 |
||||
RunTimeslices uint64 |
||||
} |
||||
|
||||
// ProcSchedstat contains the values from /proc/<pid>/schedstat
|
||||
type ProcSchedstat struct { |
||||
RunningNanoseconds uint64 |
||||
WaitingNanoseconds uint64 |
||||
RunTimeslices uint64 |
||||
} |
||||
|
||||
func (fs FS) Schedstat() (*Schedstat, error) { |
||||
file, err := os.Open(fs.proc.Path("schedstat")) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer file.Close() |
||||
|
||||
stats := &Schedstat{} |
||||
scanner := bufio.NewScanner(file) |
||||
|
||||
for scanner.Scan() { |
||||
match := cpuLineRE.FindStringSubmatch(scanner.Text()) |
||||
if match != nil { |
||||
cpu := &SchedstatCPU{} |
||||
cpu.CPUNum = match[1] |
||||
|
||||
cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
stats.CPUs = append(stats.CPUs, cpu) |
||||
} |
||||
} |
||||
|
||||
return stats, nil |
||||
} |
||||
|
||||
func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) { |
||||
match := procLineRE.FindStringSubmatch(contents) |
||||
|
||||
if match != nil { |
||||
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) |
||||
return |
||||
} |
||||
|
||||
err = errors.New("could not parse schedstat") |
||||
return |
||||
} |
@ -0,0 +1,210 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io/ioutil" |
||||
"os" |
||||
"path/filepath" |
||||
"strings" |
||||
|
||||
"github.com/prometheus/procfs/internal/util" |
||||
) |
||||
|
||||
// The VM interface is described at
|
||||
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
||||
// Each setting is exposed as a single file.
|
||||
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
|
||||
// and numa_zonelist_order (deprecated) which is a string
|
||||
type VM struct { |
||||
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
|
||||
BlockDump *int64 // /proc/sys/vm/block_dump
|
||||
CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
|
||||
DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
|
||||
DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
|
||||
DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
|
||||
DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
|
||||
DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
|
||||
DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
|
||||
DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
|
||||
DropCaches *int64 // /proc/sys/vm/drop_caches
|
||||
ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
|
||||
HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
|
||||
LaptopMode *int64 // /proc/sys/vm/laptop_mode
|
||||
LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
|
||||
LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
|
||||
MaxMapCount *int64 // /proc/sys/vm/max_map_count
|
||||
MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
|
||||
MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
|
||||
MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
|
||||
MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
|
||||
MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
|
||||
MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
|
||||
NrHugepages *int64 // /proc/sys/vm/nr_hugepages
|
||||
NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
|
||||
NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
|
||||
NumaStat *int64 // /proc/sys/vm/numa_stat
|
||||
NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
|
||||
OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
|
||||
OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
|
||||
OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
|
||||
OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
|
||||
OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
|
||||
PageCluster *int64 // /proc/sys/vm/page-cluster
|
||||
PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
|
||||
PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
|
||||
StatInterval *int64 // /proc/sys/vm/stat_interval
|
||||
Swappiness *int64 // /proc/sys/vm/swappiness
|
||||
UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
|
||||
VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
|
||||
WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
|
||||
WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
|
||||
ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
|
||||
} |
||||
|
||||
// VM reads the VM statistics from the specified `proc` filesystem.
|
||||
func (fs FS) VM() (*VM, error) { |
||||
path := fs.proc.Path("sys/vm") |
||||
file, err := os.Stat(path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if !file.Mode().IsDir() { |
||||
return nil, fmt.Errorf("%s is not a directory", path) |
||||
} |
||||
|
||||
files, err := ioutil.ReadDir(path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
var vm VM |
||||
for _, f := range files { |
||||
if f.IsDir() { |
||||
continue |
||||
} |
||||
|
||||
name := filepath.Join(path, f.Name()) |
||||
// ignore errors on read, as there are some write only
|
||||
// in /proc/sys/vm
|
||||
value, err := util.SysReadFile(name) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
vp := util.NewValueParser(value) |
||||
|
||||
switch f.Name() { |
||||
case "admin_reserve_kbytes": |
||||
vm.AdminReserveKbytes = vp.PInt64() |
||||
case "block_dump": |
||||
vm.BlockDump = vp.PInt64() |
||||
case "compact_unevictable_allowed": |
||||
vm.CompactUnevictableAllowed = vp.PInt64() |
||||
case "dirty_background_bytes": |
||||
vm.DirtyBackgroundBytes = vp.PInt64() |
||||
case "dirty_background_ratio": |
||||
vm.DirtyBackgroundRatio = vp.PInt64() |
||||
case "dirty_bytes": |
||||
vm.DirtyBytes = vp.PInt64() |
||||
case "dirty_expire_centisecs": |
||||
vm.DirtyExpireCentisecs = vp.PInt64() |
||||
case "dirty_ratio": |
||||
vm.DirtyRatio = vp.PInt64() |
||||
case "dirtytime_expire_seconds": |
||||
vm.DirtytimeExpireSeconds = vp.PInt64() |
||||
case "dirty_writeback_centisecs": |
||||
vm.DirtyWritebackCentisecs = vp.PInt64() |
||||
case "drop_caches": |
||||
vm.DropCaches = vp.PInt64() |
||||
case "extfrag_threshold": |
||||
vm.ExtfragThreshold = vp.PInt64() |
||||
case "hugetlb_shm_group": |
||||
vm.HugetlbShmGroup = vp.PInt64() |
||||
case "laptop_mode": |
||||
vm.LaptopMode = vp.PInt64() |
||||
case "legacy_va_layout": |
||||
vm.LegacyVaLayout = vp.PInt64() |
||||
case "lowmem_reserve_ratio": |
||||
stringSlice := strings.Fields(value) |
||||
pint64Slice := make([]*int64, 0, len(stringSlice)) |
||||
for _, value := range stringSlice { |
||||
vp := util.NewValueParser(value) |
||||
pint64Slice = append(pint64Slice, vp.PInt64()) |
||||
} |
||||
vm.LowmemReserveRatio = pint64Slice |
||||
case "max_map_count": |
||||
vm.MaxMapCount = vp.PInt64() |
||||
case "memory_failure_early_kill": |
||||
vm.MemoryFailureEarlyKill = vp.PInt64() |
||||
case "memory_failure_recovery": |
||||
vm.MemoryFailureRecovery = vp.PInt64() |
||||
case "min_free_kbytes": |
||||
vm.MinFreeKbytes = vp.PInt64() |
||||
case "min_slab_ratio": |
||||
vm.MinSlabRatio = vp.PInt64() |
||||
case "min_unmapped_ratio": |
||||
vm.MinUnmappedRatio = vp.PInt64() |
||||
case "mmap_min_addr": |
||||
vm.MmapMinAddr = vp.PInt64() |
||||
case "nr_hugepages": |
||||
vm.NrHugepages = vp.PInt64() |
||||
case "nr_hugepages_mempolicy": |
||||
vm.NrHugepagesMempolicy = vp.PInt64() |
||||
case "nr_overcommit_hugepages": |
||||
vm.NrOvercommitHugepages = vp.PInt64() |
||||
case "numa_stat": |
||||
vm.NumaStat = vp.PInt64() |
||||
case "numa_zonelist_order": |
||||
vm.NumaZonelistOrder = value |
||||
case "oom_dump_tasks": |
||||
vm.OomDumpTasks = vp.PInt64() |
||||
case "oom_kill_allocating_task": |
||||
vm.OomKillAllocatingTask = vp.PInt64() |
||||
case "overcommit_kbytes": |
||||
vm.OvercommitKbytes = vp.PInt64() |
||||
case "overcommit_memory": |
||||
vm.OvercommitMemory = vp.PInt64() |
||||
case "overcommit_ratio": |
||||
vm.OvercommitRatio = vp.PInt64() |
||||
case "page-cluster": |
||||
vm.PageCluster = vp.PInt64() |
||||
case "panic_on_oom": |
||||
vm.PanicOnOom = vp.PInt64() |
||||
case "percpu_pagelist_fraction": |
||||
vm.PercpuPagelistFraction = vp.PInt64() |
||||
case "stat_interval": |
||||
vm.StatInterval = vp.PInt64() |
||||
case "swappiness": |
||||
vm.Swappiness = vp.PInt64() |
||||
case "user_reserve_kbytes": |
||||
vm.UserReserveKbytes = vp.PInt64() |
||||
case "vfs_cache_pressure": |
||||
vm.VfsCachePressure = vp.PInt64() |
||||
case "watermark_boost_factor": |
||||
vm.WatermarkBoostFactor = vp.PInt64() |
||||
case "watermark_scale_factor": |
||||
vm.WatermarkScaleFactor = vp.PInt64() |
||||
case "zone_reclaim_mode": |
||||
vm.ZoneReclaimMode = vp.PInt64() |
||||
} |
||||
if err := vp.Err(); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
return &vm, nil |
||||
} |
@ -0,0 +1,196 @@ |
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package procfs |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"regexp" |
||||
"strings" |
||||
|
||||
"github.com/prometheus/procfs/internal/util" |
||||
) |
||||
|
||||
// Zoneinfo holds info parsed from /proc/zoneinfo.
|
||||
type Zoneinfo struct { |
||||
Node string |
||||
Zone string |
||||
NrFreePages *int64 |
||||
Min *int64 |
||||
Low *int64 |
||||
High *int64 |
||||
Scanned *int64 |
||||
Spanned *int64 |
||||
Present *int64 |
||||
Managed *int64 |
||||
NrActiveAnon *int64 |
||||
NrInactiveAnon *int64 |
||||
NrIsolatedAnon *int64 |
||||
NrAnonPages *int64 |
||||
NrAnonTransparentHugepages *int64 |
||||
NrActiveFile *int64 |
||||
NrInactiveFile *int64 |
||||
NrIsolatedFile *int64 |
||||
NrFilePages *int64 |
||||
NrSlabReclaimable *int64 |
||||
NrSlabUnreclaimable *int64 |
||||
NrMlockStack *int64 |
||||
NrKernelStack *int64 |
||||
NrMapped *int64 |
||||
NrDirty *int64 |
||||
NrWriteback *int64 |
||||
NrUnevictable *int64 |
||||
NrShmem *int64 |
||||
NrDirtied *int64 |
||||
NrWritten *int64 |
||||
NumaHit *int64 |
||||
NumaMiss *int64 |
||||
NumaForeign *int64 |
||||
NumaInterleave *int64 |
||||
NumaLocal *int64 |
||||
NumaOther *int64 |
||||
Protection []*int64 |
||||
} |
||||
|
||||
var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) |
||||
|
||||
// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
|
||||
// structs containing the relevant info. More information available here:
|
||||
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
||||
func (fs FS) Zoneinfo() ([]Zoneinfo, error) { |
||||
data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) |
||||
} |
||||
zoneinfo, err := parseZoneinfo(data) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) |
||||
} |
||||
return zoneinfo, nil |
||||
} |
||||
|
||||
func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { |
||||
|
||||
zoneinfo := []Zoneinfo{} |
||||
|
||||
zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) |
||||
for _, block := range zoneinfoBlocks { |
||||
var zoneinfoElement Zoneinfo |
||||
lines := strings.Split(string(block), "\n") |
||||
for _, line := range lines { |
||||
|
||||
if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { |
||||
zoneinfoElement.Node = nodeZone[1] |
||||
zoneinfoElement.Zone = nodeZone[2] |
||||
continue |
||||
} |
||||
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { |
||||
zoneinfoElement.Zone = "" |
||||
continue |
||||
} |
||||
parts := strings.Fields(strings.TrimSpace(line)) |
||||
if len(parts) < 2 { |
||||
continue |
||||
} |
||||
vp := util.NewValueParser(parts[1]) |
||||
switch parts[0] { |
||||
case "nr_free_pages": |
||||
zoneinfoElement.NrFreePages = vp.PInt64() |
||||
case "min": |
||||
zoneinfoElement.Min = vp.PInt64() |
||||
case "low": |
||||
zoneinfoElement.Low = vp.PInt64() |
||||
case "high": |
||||
zoneinfoElement.High = vp.PInt64() |
||||
case "scanned": |
||||
zoneinfoElement.Scanned = vp.PInt64() |
||||
case "spanned": |
||||
zoneinfoElement.Spanned = vp.PInt64() |
||||
case "present": |
||||
zoneinfoElement.Present = vp.PInt64() |
||||
case "managed": |
||||
zoneinfoElement.Managed = vp.PInt64() |
||||
case "nr_active_anon": |
||||
zoneinfoElement.NrActiveAnon = vp.PInt64() |
||||
case "nr_inactive_anon": |
||||
zoneinfoElement.NrInactiveAnon = vp.PInt64() |
||||
case "nr_isolated_anon": |
||||
zoneinfoElement.NrIsolatedAnon = vp.PInt64() |
||||
case "nr_anon_pages": |
||||
zoneinfoElement.NrAnonPages = vp.PInt64() |
||||
case "nr_anon_transparent_hugepages": |
||||
zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() |
||||
case "nr_active_file": |
||||
zoneinfoElement.NrActiveFile = vp.PInt64() |
||||
case "nr_inactive_file": |
||||
zoneinfoElement.NrInactiveFile = vp.PInt64() |
||||
case "nr_isolated_file": |
||||
zoneinfoElement.NrIsolatedFile = vp.PInt64() |
||||
case "nr_file_pages": |
||||
zoneinfoElement.NrFilePages = vp.PInt64() |
||||
case "nr_slab_reclaimable": |
||||
zoneinfoElement.NrSlabReclaimable = vp.PInt64() |
||||
case "nr_slab_unreclaimable": |
||||
zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() |
||||
case "nr_mlock_stack": |
||||
zoneinfoElement.NrMlockStack = vp.PInt64() |
||||
case "nr_kernel_stack": |
||||
zoneinfoElement.NrKernelStack = vp.PInt64() |
||||
case "nr_mapped": |
||||
zoneinfoElement.NrMapped = vp.PInt64() |
||||
case "nr_dirty": |
||||
zoneinfoElement.NrDirty = vp.PInt64() |
||||
case "nr_writeback": |
||||
zoneinfoElement.NrWriteback = vp.PInt64() |
||||
case "nr_unevictable": |
||||
zoneinfoElement.NrUnevictable = vp.PInt64() |
||||
case "nr_shmem": |
||||
zoneinfoElement.NrShmem = vp.PInt64() |
||||
case "nr_dirtied": |
||||
zoneinfoElement.NrDirtied = vp.PInt64() |
||||
case "nr_written": |
||||
zoneinfoElement.NrWritten = vp.PInt64() |
||||
case "numa_hit": |
||||
zoneinfoElement.NumaHit = vp.PInt64() |
||||
case "numa_miss": |
||||
zoneinfoElement.NumaMiss = vp.PInt64() |
||||
case "numa_foreign": |
||||
zoneinfoElement.NumaForeign = vp.PInt64() |
||||
case "numa_interleave": |
||||
zoneinfoElement.NumaInterleave = vp.PInt64() |
||||
case "numa_local": |
||||
zoneinfoElement.NumaLocal = vp.PInt64() |
||||
case "numa_other": |
||||
zoneinfoElement.NumaOther = vp.PInt64() |
||||
case "protection:": |
||||
protectionParts := strings.Split(line, ":") |
||||
protectionValues := strings.Replace(protectionParts[1], "(", "", 1) |
||||
protectionValues = strings.Replace(protectionValues, ")", "", 1) |
||||
protectionValues = strings.TrimSpace(protectionValues) |
||||
protectionStringMap := strings.Split(protectionValues, ", ") |
||||
val, err := util.ParsePInt64s(protectionStringMap) |
||||
if err == nil { |
||||
zoneinfoElement.Protection = val |
||||
} |
||||
} |
||||
|
||||
} |
||||
|
||||
zoneinfo = append(zoneinfo, zoneinfoElement) |
||||
} |
||||
return zoneinfo, nil |
||||
} |
@ -0,0 +1,309 @@ |
||||
package assert |
||||
|
||||
import ( |
||||
"fmt" |
||||
"reflect" |
||||
) |
||||
|
||||
func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { |
||||
switch kind { |
||||
case reflect.Int: |
||||
{ |
||||
intobj1 := obj1.(int) |
||||
intobj2 := obj2.(int) |
||||
if intobj1 > intobj2 { |
||||
return -1, true |
||||
} |
||||
if intobj1 == intobj2 { |
||||
return 0, true |
||||
} |
||||
if intobj1 < intobj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Int8: |
||||
{ |
||||
int8obj1 := obj1.(int8) |
||||
int8obj2 := obj2.(int8) |
||||
if int8obj1 > int8obj2 { |
||||
return -1, true |
||||
} |
||||
if int8obj1 == int8obj2 { |
||||
return 0, true |
||||
} |
||||
if int8obj1 < int8obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Int16: |
||||
{ |
||||
int16obj1 := obj1.(int16) |
||||
int16obj2 := obj2.(int16) |
||||
if int16obj1 > int16obj2 { |
||||
return -1, true |
||||
} |
||||
if int16obj1 == int16obj2 { |
||||
return 0, true |
||||
} |
||||
if int16obj1 < int16obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Int32: |
||||
{ |
||||
int32obj1 := obj1.(int32) |
||||
int32obj2 := obj2.(int32) |
||||
if int32obj1 > int32obj2 { |
||||
return -1, true |
||||
} |
||||
if int32obj1 == int32obj2 { |
||||
return 0, true |
||||
} |
||||
if int32obj1 < int32obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Int64: |
||||
{ |
||||
int64obj1 := obj1.(int64) |
||||
int64obj2 := obj2.(int64) |
||||
if int64obj1 > int64obj2 { |
||||
return -1, true |
||||
} |
||||
if int64obj1 == int64obj2 { |
||||
return 0, true |
||||
} |
||||
if int64obj1 < int64obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Uint: |
||||
{ |
||||
uintobj1 := obj1.(uint) |
||||
uintobj2 := obj2.(uint) |
||||
if uintobj1 > uintobj2 { |
||||
return -1, true |
||||
} |
||||
if uintobj1 == uintobj2 { |
||||
return 0, true |
||||
} |
||||
if uintobj1 < uintobj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Uint8: |
||||
{ |
||||
uint8obj1 := obj1.(uint8) |
||||
uint8obj2 := obj2.(uint8) |
||||
if uint8obj1 > uint8obj2 { |
||||
return -1, true |
||||
} |
||||
if uint8obj1 == uint8obj2 { |
||||
return 0, true |
||||
} |
||||
if uint8obj1 < uint8obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Uint16: |
||||
{ |
||||
uint16obj1 := obj1.(uint16) |
||||
uint16obj2 := obj2.(uint16) |
||||
if uint16obj1 > uint16obj2 { |
||||
return -1, true |
||||
} |
||||
if uint16obj1 == uint16obj2 { |
||||
return 0, true |
||||
} |
||||
if uint16obj1 < uint16obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Uint32: |
||||
{ |
||||
uint32obj1 := obj1.(uint32) |
||||
uint32obj2 := obj2.(uint32) |
||||
if uint32obj1 > uint32obj2 { |
||||
return -1, true |
||||
} |
||||
if uint32obj1 == uint32obj2 { |
||||
return 0, true |
||||
} |
||||
if uint32obj1 < uint32obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Uint64: |
||||
{ |
||||
uint64obj1 := obj1.(uint64) |
||||
uint64obj2 := obj2.(uint64) |
||||
if uint64obj1 > uint64obj2 { |
||||
return -1, true |
||||
} |
||||
if uint64obj1 == uint64obj2 { |
||||
return 0, true |
||||
} |
||||
if uint64obj1 < uint64obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Float32: |
||||
{ |
||||
float32obj1 := obj1.(float32) |
||||
float32obj2 := obj2.(float32) |
||||
if float32obj1 > float32obj2 { |
||||
return -1, true |
||||
} |
||||
if float32obj1 == float32obj2 { |
||||
return 0, true |
||||
} |
||||
if float32obj1 < float32obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.Float64: |
||||
{ |
||||
float64obj1 := obj1.(float64) |
||||
float64obj2 := obj2.(float64) |
||||
if float64obj1 > float64obj2 { |
||||
return -1, true |
||||
} |
||||
if float64obj1 == float64obj2 { |
||||
return 0, true |
||||
} |
||||
if float64obj1 < float64obj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
case reflect.String: |
||||
{ |
||||
stringobj1 := obj1.(string) |
||||
stringobj2 := obj2.(string) |
||||
if stringobj1 > stringobj2 { |
||||
return -1, true |
||||
} |
||||
if stringobj1 == stringobj2 { |
||||
return 0, true |
||||
} |
||||
if stringobj1 < stringobj2 { |
||||
return 1, true |
||||
} |
||||
} |
||||
} |
||||
|
||||
return 0, false |
||||
} |
||||
|
||||
// Greater asserts that the first element is greater than the second
|
||||
//
|
||||
// assert.Greater(t, 2, 1)
|
||||
// assert.Greater(t, float64(2), float64(1))
|
||||
// assert.Greater(t, "b", "a")
|
||||
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { |
||||
if h, ok := t.(tHelper); ok { |
||||
h.Helper() |
||||
} |
||||
|
||||
e1Kind := reflect.ValueOf(e1).Kind() |
||||
e2Kind := reflect.ValueOf(e2).Kind() |
||||
if e1Kind != e2Kind { |
||||
return Fail(t, "Elements should be the same type", msgAndArgs...) |
||||
} |
||||
|
||||
res, isComparable := compare(e1, e2, e1Kind) |
||||
if !isComparable { |
||||
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) |
||||
} |
||||
|
||||
if res != -1 { |
||||
return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...) |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
// GreaterOrEqual asserts that the first element is greater than or equal to the second
|
||||
//
|
||||
// assert.GreaterOrEqual(t, 2, 1)
|
||||
// assert.GreaterOrEqual(t, 2, 2)
|
||||
// assert.GreaterOrEqual(t, "b", "a")
|
||||
// assert.GreaterOrEqual(t, "b", "b")
|
||||
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { |
||||
if h, ok := t.(tHelper); ok { |
||||
h.Helper() |
||||
} |
||||
|
||||
e1Kind := reflect.ValueOf(e1).Kind() |
||||
e2Kind := reflect.ValueOf(e2).Kind() |
||||
if e1Kind != e2Kind { |
||||
return Fail(t, "Elements should be the same type", msgAndArgs...) |
||||
} |
||||
|
||||
res, isComparable := compare(e1, e2, e1Kind) |
||||
if !isComparable { |
||||
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) |
||||
} |
||||
|
||||
if res != -1 && res != 0 { |
||||
return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...) |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
// Less asserts that the first element is less than the second
|
||||
//
|
||||
// assert.Less(t, 1, 2)
|
||||
// assert.Less(t, float64(1), float64(2))
|
||||
// assert.Less(t, "a", "b")
|
||||
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { |
||||
if h, ok := t.(tHelper); ok { |
||||
h.Helper() |
||||
} |
||||
|
||||
e1Kind := reflect.ValueOf(e1).Kind() |
||||
e2Kind := reflect.ValueOf(e2).Kind() |
||||
if e1Kind != e2Kind { |
||||
return Fail(t, "Elements should be the same type", msgAndArgs...) |
||||
} |
||||
|
||||
res, isComparable := compare(e1, e2, e1Kind) |
||||
if !isComparable { |
||||
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) |
||||
} |
||||
|
||||
if res != 1 { |
||||
return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...) |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
// LessOrEqual asserts that the first element is less than or equal to the second
|
||||
//
|
||||
// assert.LessOrEqual(t, 1, 2)
|
||||
// assert.LessOrEqual(t, 2, 2)
|
||||
// assert.LessOrEqual(t, "a", "b")
|
||||
// assert.LessOrEqual(t, "b", "b")
|
||||
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { |
||||
if h, ok := t.(tHelper); ok { |
||||
h.Helper() |
||||
} |
||||
|
||||
e1Kind := reflect.ValueOf(e1).Kind() |
||||
e2Kind := reflect.ValueOf(e2).Kind() |
||||
if e1Kind != e2Kind { |
||||
return Fail(t, "Elements should be the same type", msgAndArgs...) |
||||
} |
||||
|
||||
res, isComparable := compare(e1, e2, e1Kind) |
||||
if !isComparable { |
||||
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) |
||||
} |
||||
|
||||
if res != 1 && res != 0 { |
||||
return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) |
||||
} |
||||
|
||||
return true |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@ |
||||
{{.Comment}} |
||||
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { |
||||
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } |
||||
if h, ok := t.(tHelper); ok { h.Helper() } |
||||
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } |
||||
t.FailNow() |
||||
} |
||||
|
Loading…
Reference in new issue