workaround broken drone build (#7362)
* workaround broken swagger only master brach is not working, latest release seems to work Signed-off-by: Michael Gnehr <michael@gnehr.de> * make vendor Signed-off-by: Michael Gnehr <michael@gnehr.de> * Don't export GO111MODULE * set go-swagger to fixed release version mentioned here: https://github.com/go-gitea/gitea/pull/7362#discussion_r300831537 Signed-off-by: Michael Gnehr <michael@gnehr.de>tokarchuk/v1.17
parent
49ee9d2771
commit
86750325c7
@ -0,0 +1,63 @@ |
|||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto |
||||||
|
|
||||||
|
import "errors" |
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } |
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func GetStats() Stats { return Stats{} } |
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func MarshalMessageSet(interface{}) ([]byte, error) { |
||||||
|
return nil, errors.New("proto: not implemented") |
||||||
|
} |
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func UnmarshalMessageSet([]byte, interface{}) error { |
||||||
|
return errors.New("proto: not implemented") |
||||||
|
} |
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func MarshalMessageSetJSON(interface{}) ([]byte, error) { |
||||||
|
return nil, errors.New("proto: not implemented") |
||||||
|
} |
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func UnmarshalMessageSetJSON([]byte, interface{}) error { |
||||||
|
return errors.New("proto: not implemented") |
||||||
|
} |
||||||
|
|
||||||
|
// Deprecated: do not use.
|
||||||
|
func RegisterMessageSetType(Message, int32, string) {} |
181
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
181
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
@ -1,181 +0,0 @@ |
|||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package promhttp |
|
||||||
|
|
||||||
import ( |
|
||||||
"io" |
|
||||||
"net/http" |
|
||||||
) |
|
||||||
|
|
||||||
type pusherDelegator struct{ *responseWriterDelegator } |
|
||||||
|
|
||||||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { |
|
||||||
return d.ResponseWriter.(http.Pusher).Push(target, opts) |
|
||||||
} |
|
||||||
|
|
||||||
func init() { |
|
||||||
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
|
||||||
return pusherDelegator{d} |
|
||||||
} |
|
||||||
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
http.CloseNotifier |
|
||||||
}{d, pusherDelegator{d}, closeNotifierDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
http.Flusher |
|
||||||
}{d, pusherDelegator{d}, flusherDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
http.Flusher |
|
||||||
http.CloseNotifier |
|
||||||
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
http.Hijacker |
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
http.Hijacker |
|
||||||
http.CloseNotifier |
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
http.Hijacker |
|
||||||
http.Flusher |
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
http.Hijacker |
|
||||||
http.Flusher |
|
||||||
http.CloseNotifier |
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
io.ReaderFrom |
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
io.ReaderFrom |
|
||||||
http.CloseNotifier |
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
io.ReaderFrom |
|
||||||
http.Flusher |
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
io.ReaderFrom |
|
||||||
http.Flusher |
|
||||||
http.CloseNotifier |
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
io.ReaderFrom |
|
||||||
http.Hijacker |
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
io.ReaderFrom |
|
||||||
http.Hijacker |
|
||||||
http.CloseNotifier |
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
io.ReaderFrom |
|
||||||
http.Hijacker |
|
||||||
http.Flusher |
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} |
|
||||||
} |
|
||||||
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
|
||||||
return struct { |
|
||||||
*responseWriterDelegator |
|
||||||
http.Pusher |
|
||||||
io.ReaderFrom |
|
||||||
http.Hijacker |
|
||||||
http.Flusher |
|
||||||
http.CloseNotifier |
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { |
|
||||||
d := &responseWriterDelegator{ |
|
||||||
ResponseWriter: w, |
|
||||||
observeWriteHeader: observeWriteHeaderFunc, |
|
||||||
} |
|
||||||
|
|
||||||
id := 0 |
|
||||||
if _, ok := w.(http.CloseNotifier); ok { |
|
||||||
id += closeNotifier |
|
||||||
} |
|
||||||
if _, ok := w.(http.Flusher); ok { |
|
||||||
id += flusher |
|
||||||
} |
|
||||||
if _, ok := w.(http.Hijacker); ok { |
|
||||||
id += hijacker |
|
||||||
} |
|
||||||
if _, ok := w.(io.ReaderFrom); ok { |
|
||||||
id += readerFrom |
|
||||||
} |
|
||||||
if _, ok := w.(http.Pusher); ok { |
|
||||||
id += pusher |
|
||||||
} |
|
||||||
|
|
||||||
return pickDelegator[id](d) |
|
||||||
} |
|
44
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
generated
vendored
44
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
generated
vendored
@ -1,44 +0,0 @@ |
|||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !go1.8
|
|
||||||
|
|
||||||
package promhttp |
|
||||||
|
|
||||||
import ( |
|
||||||
"io" |
|
||||||
"net/http" |
|
||||||
) |
|
||||||
|
|
||||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { |
|
||||||
d := &responseWriterDelegator{ |
|
||||||
ResponseWriter: w, |
|
||||||
observeWriteHeader: observeWriteHeaderFunc, |
|
||||||
} |
|
||||||
|
|
||||||
id := 0 |
|
||||||
if _, ok := w.(http.CloseNotifier); ok { |
|
||||||
id += closeNotifier |
|
||||||
} |
|
||||||
if _, ok := w.(http.Flusher); ok { |
|
||||||
id += flusher |
|
||||||
} |
|
||||||
if _, ok := w.(http.Hijacker); ok { |
|
||||||
id += hijacker |
|
||||||
} |
|
||||||
if _, ok := w.(io.ReaderFrom); ok { |
|
||||||
id += readerFrom |
|
||||||
} |
|
||||||
|
|
||||||
return pickDelegator[id](d) |
|
||||||
} |
|
122
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
122
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
144
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
144
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
@ -1,144 +0,0 @@ |
|||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package promhttp |
|
||||||
|
|
||||||
import ( |
|
||||||
"context" |
|
||||||
"crypto/tls" |
|
||||||
"net/http" |
|
||||||
"net/http/httptrace" |
|
||||||
"time" |
|
||||||
) |
|
||||||
|
|
||||||
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
|
||||||
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
|
||||||
// representing the time in seconds since the start of the http request. A user
|
|
||||||
// may choose to use separately buckets Histograms, or implement custom
|
|
||||||
// instance labels on a per function basis.
|
|
||||||
type InstrumentTrace struct { |
|
||||||
GotConn func(float64) |
|
||||||
PutIdleConn func(float64) |
|
||||||
GotFirstResponseByte func(float64) |
|
||||||
Got100Continue func(float64) |
|
||||||
DNSStart func(float64) |
|
||||||
DNSDone func(float64) |
|
||||||
ConnectStart func(float64) |
|
||||||
ConnectDone func(float64) |
|
||||||
TLSHandshakeStart func(float64) |
|
||||||
TLSHandshakeDone func(float64) |
|
||||||
WroteHeaders func(float64) |
|
||||||
Wait100Continue func(float64) |
|
||||||
WroteRequest func(float64) |
|
||||||
} |
|
||||||
|
|
||||||
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
|
||||||
// RoundTripper and reports times to hook functions provided in the
|
|
||||||
// InstrumentTrace struct. Hook functions that are not present in the provided
|
|
||||||
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
|
||||||
// time since the start of the request. Only with Go1.9+, those times are
|
|
||||||
// guaranteed to never be negative. (Earlier Go versions are not using a
|
|
||||||
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
|
||||||
// should be used judiciously.
|
|
||||||
//
|
|
||||||
// For hook functions that receive an error as an argument, no observations are
|
|
||||||
// made in the event of a non-nil error value.
|
|
||||||
//
|
|
||||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
|
||||||
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { |
|
||||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { |
|
||||||
start := time.Now() |
|
||||||
|
|
||||||
trace := &httptrace.ClientTrace{ |
|
||||||
GotConn: func(_ httptrace.GotConnInfo) { |
|
||||||
if it.GotConn != nil { |
|
||||||
it.GotConn(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
PutIdleConn: func(err error) { |
|
||||||
if err != nil { |
|
||||||
return |
|
||||||
} |
|
||||||
if it.PutIdleConn != nil { |
|
||||||
it.PutIdleConn(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
DNSStart: func(_ httptrace.DNSStartInfo) { |
|
||||||
if it.DNSStart != nil { |
|
||||||
it.DNSStart(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
DNSDone: func(_ httptrace.DNSDoneInfo) { |
|
||||||
if it.DNSDone != nil { |
|
||||||
it.DNSDone(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
ConnectStart: func(_, _ string) { |
|
||||||
if it.ConnectStart != nil { |
|
||||||
it.ConnectStart(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
ConnectDone: func(_, _ string, err error) { |
|
||||||
if err != nil { |
|
||||||
return |
|
||||||
} |
|
||||||
if it.ConnectDone != nil { |
|
||||||
it.ConnectDone(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
GotFirstResponseByte: func() { |
|
||||||
if it.GotFirstResponseByte != nil { |
|
||||||
it.GotFirstResponseByte(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
Got100Continue: func() { |
|
||||||
if it.Got100Continue != nil { |
|
||||||
it.Got100Continue(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
TLSHandshakeStart: func() { |
|
||||||
if it.TLSHandshakeStart != nil { |
|
||||||
it.TLSHandshakeStart(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
TLSHandshakeDone: func(_ tls.ConnectionState, err error) { |
|
||||||
if err != nil { |
|
||||||
return |
|
||||||
} |
|
||||||
if it.TLSHandshakeDone != nil { |
|
||||||
it.TLSHandshakeDone(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
WroteHeaders: func() { |
|
||||||
if it.WroteHeaders != nil { |
|
||||||
it.WroteHeaders(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
Wait100Continue: func() { |
|
||||||
if it.Wait100Continue != nil { |
|
||||||
it.Wait100Continue(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
WroteRequest: func(_ httptrace.WroteRequestInfo) { |
|
||||||
if it.WroteRequest != nil { |
|
||||||
it.WroteRequest(time.Since(start).Seconds()) |
|
||||||
} |
|
||||||
}, |
|
||||||
} |
|
||||||
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) |
|
||||||
|
|
||||||
return next.RoundTrip(r) |
|
||||||
}) |
|
||||||
} |
|
6
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
6
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
@ -0,0 +1,6 @@ |
|||||||
|
# Run only staticcheck for now. Additional linters will be enabled one-by-one. |
||||||
|
linters: |
||||||
|
enable: |
||||||
|
- staticcheck |
||||||
|
- govet |
||||||
|
disable-all: true |
@ -1 +1,2 @@ |
|||||||
* Tobias Schmidt <tobidt@gmail.com> |
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish |
||||||
|
* Paul Gier <pgier@redhat.com> @pgier |
||||||
|
@ -0,0 +1,272 @@ |
|||||||
|
# Copyright 2018 The Prometheus Authors
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
# A common Makefile that includes rules to be reused in different prometheus projects.
|
||||||
|
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
|
||||||
|
|
||||||
|
# Example usage :
|
||||||
|
# Create the main Makefile in the root project directory.
|
||||||
|
# include Makefile.common
|
||||||
|
# customTarget:
|
||||||
|
# @echo ">> Running customTarget"
|
||||||
|
#
|
||||||
|
|
||||||
|
# Ensure GOBIN is not set during build so that promu is installed to the correct path
|
||||||
|
unexport GOBIN |
||||||
|
|
||||||
|
GO ?= go
|
||||||
|
GOFMT ?= $(GO)fmt
|
||||||
|
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||||
|
GOOPTS ?=
|
||||||
|
GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
||||||
|
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||||
|
|
||||||
|
GO_VERSION ?= $(shell $(GO) version)
|
||||||
|
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||||
|
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||||
|
|
||||||
|
GOVENDOR :=
|
||||||
|
GO111MODULE :=
|
||||||
|
ifeq (, $(PRE_GO_111)) |
||||||
|
ifneq (,$(wildcard go.mod))
|
||||||
|
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
||||||
|
GO111MODULE := on
|
||||||
|
|
||||||
|
ifneq (,$(wildcard vendor))
|
||||||
|
# Always use the local vendor/ directory to satisfy the dependencies.
|
||||||
|
GOOPTS := $(GOOPTS) -mod=vendor
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
else |
||||||
|
ifneq (,$(wildcard go.mod))
|
||||||
|
ifneq (,$(wildcard vendor))
|
||||||
|
$(warning This repository requires Go >= 1.11 because of Go modules) |
||||||
|
$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') |
||||||
|
endif
|
||||||
|
else
|
||||||
|
# This repository isn't using Go modules (yet).
|
||||||
|
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
||||||
|
endif
|
||||||
|
endif |
||||||
|
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||||
|
pkgs = ./...
|
||||||
|
|
||||||
|
ifeq (arm, $(GOHOSTARCH)) |
||||||
|
GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
|
||||||
|
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
|
||||||
|
else |
||||||
|
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||||
|
endif |
||||||
|
|
||||||
|
PROMU_VERSION ?= 0.3.0
|
||||||
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
|
GOLANGCI_LINT :=
|
||||||
|
GOLANGCI_LINT_OPTS ?=
|
||||||
|
GOLANGCI_LINT_VERSION ?= v1.16.0
|
||||||
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
|
# windows isn't included here because of the path separator being different.
|
||||||
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) |
||||||
|
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||||
|
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||||
|
endif
|
||||||
|
endif |
||||||
|
|
||||||
|
PREFIX ?= $(shell pwd)
|
||||||
|
BIN_DIR ?= $(shell pwd)
|
||||||
|
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||||
|
DOCKER_REPO ?= prom
|
||||||
|
|
||||||
|
DOCKER_ARCHS ?= amd64
|
||||||
|
|
||||||
|
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
||||||
|
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||||
|
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||||
|
|
||||||
|
ifeq ($(GOHOSTARCH),amd64) |
||||||
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||||
|
# Only supported on amd64
|
||||||
|
test-flags := -race
|
||||||
|
endif
|
||||||
|
endif |
||||||
|
|
||||||
|
# This rule is used to forward a target like "build" to "common-build". This
|
||||||
|
# allows a new "build" target to be defined in a Makefile which includes this
|
||||||
|
# one and override "common-build" without override warnings.
|
||||||
|
%: common-% ; |
||||||
|
|
||||||
|
.PHONY: common-all |
||||||
|
common-all: precheck style check_license lint unused build test |
||||||
|
|
||||||
|
.PHONY: common-style |
||||||
|
common-style: |
||||||
|
@echo ">> checking code style"
|
||||||
|
@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
|
||||||
|
if [ -n "$${fmtRes}" ]; then \
|
||||||
|
echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
|
||||||
|
echo "Please ensure you are using $$($(GO) version) for formatting code."; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: common-check_license |
||||||
|
common-check_license: |
||||||
|
@echo ">> checking license header"
|
||||||
|
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
|
||||||
|
awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
|
||||||
|
done); \
|
||||||
|
if [ -n "$${licRes}" ]; then \
|
||||||
|
echo "license header checking failed:"; echo "$${licRes}"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: common-deps |
||||||
|
common-deps: |
||||||
|
@echo ">> getting dependencies"
|
||||||
|
ifdef GO111MODULE |
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) mod download
|
||||||
|
else |
||||||
|
$(GO) get $(GOOPTS) -t ./...
|
||||||
|
endif |
||||||
|
|
||||||
|
.PHONY: common-test-short |
||||||
|
common-test-short: |
||||||
|
@echo ">> running short tests"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
|
.PHONY: common-test |
||||||
|
common-test: |
||||||
|
@echo ">> running all tests"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
|
.PHONY: common-format |
||||||
|
common-format: |
||||||
|
@echo ">> formatting code"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
|
||||||
|
|
||||||
|
.PHONY: common-vet |
||||||
|
common-vet: |
||||||
|
@echo ">> vetting code"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
|
.PHONY: common-lint |
||||||
|
common-lint: $(GOLANGCI_LINT) |
||||||
|
ifdef GOLANGCI_LINT |
||||||
|
@echo ">> running golangci-lint"
|
||||||
|
ifdef GO111MODULE |
||||||
|
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||||
|
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||||
|
else |
||||||
|
$(GOLANGCI_LINT) run $(pkgs)
|
||||||
|
endif |
||||||
|
endif |
||||||
|
|
||||||
|
# For backward-compatibility.
|
||||||
|
.PHONY: common-staticcheck |
||||||
|
common-staticcheck: lint |
||||||
|
|
||||||
|
.PHONY: common-unused |
||||||
|
common-unused: $(GOVENDOR) |
||||||
|
ifdef GOVENDOR |
||||||
|
@echo ">> running check for unused packages"
|
||||||
|
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
||||||
|
else |
||||||
|
ifdef GO111MODULE |
||||||
|
@echo ">> running check for unused/missing packages in go.mod"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||||
|
ifeq (,$(wildcard vendor)) |
||||||
|
@git diff --exit-code -- go.sum go.mod
|
||||||
|
else |
||||||
|
@echo ">> running check for unused packages in vendor/"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||||
|
@git diff --exit-code -- go.sum go.mod vendor/
|
||||||
|
endif |
||||||
|
endif |
||||||
|
endif |
||||||
|
|
||||||
|
.PHONY: common-build |
||||||
|
common-build: promu |
||||||
|
@echo ">> building binaries"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
|
||||||
|
|
||||||
|
.PHONY: common-tarball |
||||||
|
common-tarball: promu |
||||||
|
@echo ">> building release tarball"
|
||||||
|
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||||
|
|
||||||
|
.PHONY: common-docker $(BUILD_DOCKER_ARCHS) |
||||||
|
common-docker: $(BUILD_DOCKER_ARCHS) |
||||||
|
$(BUILD_DOCKER_ARCHS): common-docker-%: |
||||||
|
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||||
|
--build-arg ARCH="$*" \
|
||||||
|
--build-arg OS="linux" \
|
||||||
|
.
|
||||||
|
|
||||||
|
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) |
||||||
|
common-docker-publish: $(PUBLISH_DOCKER_ARCHS) |
||||||
|
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: |
||||||
|
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
|
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) |
||||||
|
common-docker-tag-latest: $(TAG_DOCKER_ARCHS) |
||||||
|
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: |
||||||
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||||
|
|
||||||
|
.PHONY: common-docker-manifest |
||||||
|
common-docker-manifest: |
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
|
.PHONY: promu |
||||||
|
promu: $(PROMU) |
||||||
|
|
||||||
|
$(PROMU): |
||||||
|
$(eval PROMU_TMP := $(shell mktemp -d))
|
||||||
|
curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
|
||||||
|
mkdir -p $(FIRST_GOPATH)/bin
|
||||||
|
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
|
||||||
|
rm -r $(PROMU_TMP)
|
||||||
|
|
||||||
|
.PHONY: proto |
||||||
|
proto: |
||||||
|
@echo ">> generating code from proto files"
|
||||||
|
@./scripts/genproto.sh
|
||||||
|
|
||||||
|
ifdef GOLANGCI_LINT |
||||||
|
$(GOLANGCI_LINT): |
||||||
|
mkdir -p $(FIRST_GOPATH)/bin
|
||||||
|
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||||
|
endif |
||||||
|
|
||||||
|
ifdef GOVENDOR |
||||||
|
.PHONY: $(GOVENDOR) |
||||||
|
$(GOVENDOR): |
||||||
|
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
||||||
|
endif |
||||||
|
|
||||||
|
.PHONY: precheck |
||||||
|
precheck:: |
||||||
|
|
||||||
|
define PRECHECK_COMMAND_template =
|
||||||
|
precheck:: $(1)_precheck |
||||||
|
|
||||||
|
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
|
||||||
|
.PHONY: $(1)_precheck |
||||||
|
$(1)_precheck: |
||||||
|
@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
|
||||||
|
echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
endef |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,3 @@ |
|||||||
|
module github.com/prometheus/procfs |
||||||
|
|
||||||
|
require golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 |
@ -0,0 +1,2 @@ |
|||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= |
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
@ -0,0 +1,52 @@ |
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package fs |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"os" |
||||||
|
"path/filepath" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
// DefaultProcMountPoint is the common mount point of the proc filesystem.
|
||||||
|
DefaultProcMountPoint = "/proc" |
||||||
|
|
||||||
|
// DefaultSysMountPoint is the common mount point of the sys filesystem.
|
||||||
|
DefaultSysMountPoint = "/sys" |
||||||
|
) |
||||||
|
|
||||||
|
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
|
||||||
|
// interface to kernel data structures.
|
||||||
|
type FS string |
||||||
|
|
||||||
|
// NewFS returns a new FS mounted under the given mountPoint. It will error
|
||||||
|
// if the mount point can't be read.
|
||||||
|
func NewFS(mountPoint string) (FS, error) { |
||||||
|
info, err := os.Stat(mountPoint) |
||||||
|
if err != nil { |
||||||
|
return "", fmt.Errorf("could not read %s: %s", mountPoint, err) |
||||||
|
} |
||||||
|
if !info.IsDir() { |
||||||
|
return "", fmt.Errorf("mount point %s is not a directory", mountPoint) |
||||||
|
} |
||||||
|
|
||||||
|
return FS(mountPoint), nil |
||||||
|
} |
||||||
|
|
||||||
|
// Path appends the given path elements to the filesystem path, adding separators
|
||||||
|
// as necessary.
|
||||||
|
func (fs FS) Path(p ...string) string { |
||||||
|
return filepath.Join(append([]string{string(fs)}, p...)...) |
||||||
|
} |
@ -1,59 +0,0 @@ |
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package util |
|
||||||
|
|
||||||
import ( |
|
||||||
"io/ioutil" |
|
||||||
"strconv" |
|
||||||
"strings" |
|
||||||
) |
|
||||||
|
|
||||||
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
|
||||||
func ParseUint32s(ss []string) ([]uint32, error) { |
|
||||||
us := make([]uint32, 0, len(ss)) |
|
||||||
for _, s := range ss { |
|
||||||
u, err := strconv.ParseUint(s, 10, 32) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
us = append(us, uint32(u)) |
|
||||||
} |
|
||||||
|
|
||||||
return us, nil |
|
||||||
} |
|
||||||
|
|
||||||
// ParseUint64s parses a slice of strings into a slice of uint64s.
|
|
||||||
func ParseUint64s(ss []string) ([]uint64, error) { |
|
||||||
us := make([]uint64, 0, len(ss)) |
|
||||||
for _, s := range ss { |
|
||||||
u, err := strconv.ParseUint(s, 10, 64) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
us = append(us, u) |
|
||||||
} |
|
||||||
|
|
||||||
return us, nil |
|
||||||
} |
|
||||||
|
|
||||||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
|
||||||
func ReadUintFromFile(path string) (uint64, error) { |
|
||||||
data, err := ioutil.ReadFile(path) |
|
||||||
if err != nil { |
|
||||||
return 0, err |
|
||||||
} |
|
||||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) |
|
||||||
} |
|
@ -1,45 +0,0 @@ |
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package util |
|
||||||
|
|
||||||
import ( |
|
||||||
"bytes" |
|
||||||
"os" |
|
||||||
"syscall" |
|
||||||
) |
|
||||||
|
|
||||||
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
|
||||||
// https://github.com/prometheus/node_exporter/pull/728/files
|
|
||||||
func SysReadFile(file string) (string, error) { |
|
||||||
f, err := os.Open(file) |
|
||||||
if err != nil { |
|
||||||
return "", err |
|
||||||
} |
|
||||||
defer f.Close() |
|
||||||
|
|
||||||
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
|
||||||
// Go's ioutil.ReadFile implementation to poll forever.
|
|
||||||
//
|
|
||||||
// Since we either want to read data or bail immediately, do the simplest
|
|
||||||
// possible read using syscall directly.
|
|
||||||
b := make([]byte, 128) |
|
||||||
n, err := syscall.Read(int(f.Fd()), b) |
|
||||||
if err != nil { |
|
||||||
return "", err |
|
||||||
} |
|
||||||
|
|
||||||
return string(bytes.TrimSpace(b[:n])), nil |
|
||||||
} |
|
@ -1,263 +0,0 @@ |
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package nfs implements parsing of /proc/net/rpc/nfsd.
|
|
||||||
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
|
|
||||||
package nfs |
|
||||||
|
|
||||||
// ReplyCache models the "rc" line.
|
|
||||||
type ReplyCache struct { |
|
||||||
Hits uint64 |
|
||||||
Misses uint64 |
|
||||||
NoCache uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// FileHandles models the "fh" line.
|
|
||||||
type FileHandles struct { |
|
||||||
Stale uint64 |
|
||||||
TotalLookups uint64 |
|
||||||
AnonLookups uint64 |
|
||||||
DirNoCache uint64 |
|
||||||
NoDirNoCache uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// InputOutput models the "io" line.
|
|
||||||
type InputOutput struct { |
|
||||||
Read uint64 |
|
||||||
Write uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// Threads models the "th" line.
|
|
||||||
type Threads struct { |
|
||||||
Threads uint64 |
|
||||||
FullCnt uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// ReadAheadCache models the "ra" line.
|
|
||||||
type ReadAheadCache struct { |
|
||||||
CacheSize uint64 |
|
||||||
CacheHistogram []uint64 |
|
||||||
NotFound uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// Network models the "net" line.
|
|
||||||
type Network struct { |
|
||||||
NetCount uint64 |
|
||||||
UDPCount uint64 |
|
||||||
TCPCount uint64 |
|
||||||
TCPConnect uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// ClientRPC models the nfs "rpc" line.
|
|
||||||
type ClientRPC struct { |
|
||||||
RPCCount uint64 |
|
||||||
Retransmissions uint64 |
|
||||||
AuthRefreshes uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// ServerRPC models the nfsd "rpc" line.
|
|
||||||
type ServerRPC struct { |
|
||||||
RPCCount uint64 |
|
||||||
BadCnt uint64 |
|
||||||
BadFmt uint64 |
|
||||||
BadAuth uint64 |
|
||||||
BadcInt uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// V2Stats models the "proc2" line.
|
|
||||||
type V2Stats struct { |
|
||||||
Null uint64 |
|
||||||
GetAttr uint64 |
|
||||||
SetAttr uint64 |
|
||||||
Root uint64 |
|
||||||
Lookup uint64 |
|
||||||
ReadLink uint64 |
|
||||||
Read uint64 |
|
||||||
WrCache uint64 |
|
||||||
Write uint64 |
|
||||||
Create uint64 |
|
||||||
Remove uint64 |
|
||||||
Rename uint64 |
|
||||||
Link uint64 |
|
||||||
SymLink uint64 |
|
||||||
MkDir uint64 |
|
||||||
RmDir uint64 |
|
||||||
ReadDir uint64 |
|
||||||
FsStat uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// V3Stats models the "proc3" line.
|
|
||||||
type V3Stats struct { |
|
||||||
Null uint64 |
|
||||||
GetAttr uint64 |
|
||||||
SetAttr uint64 |
|
||||||
Lookup uint64 |
|
||||||
Access uint64 |
|
||||||
ReadLink uint64 |
|
||||||
Read uint64 |
|
||||||
Write uint64 |
|
||||||
Create uint64 |
|
||||||
MkDir uint64 |
|
||||||
SymLink uint64 |
|
||||||
MkNod uint64 |
|
||||||
Remove uint64 |
|
||||||
RmDir uint64 |
|
||||||
Rename uint64 |
|
||||||
Link uint64 |
|
||||||
ReadDir uint64 |
|
||||||
ReadDirPlus uint64 |
|
||||||
FsStat uint64 |
|
||||||
FsInfo uint64 |
|
||||||
PathConf uint64 |
|
||||||
Commit uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// ClientV4Stats models the nfs "proc4" line.
|
|
||||||
type ClientV4Stats struct { |
|
||||||
Null uint64 |
|
||||||
Read uint64 |
|
||||||
Write uint64 |
|
||||||
Commit uint64 |
|
||||||
Open uint64 |
|
||||||
OpenConfirm uint64 |
|
||||||
OpenNoattr uint64 |
|
||||||
OpenDowngrade uint64 |
|
||||||
Close uint64 |
|
||||||
Setattr uint64 |
|
||||||
FsInfo uint64 |
|
||||||
Renew uint64 |
|
||||||
SetClientID uint64 |
|
||||||
SetClientIDConfirm uint64 |
|
||||||
Lock uint64 |
|
||||||
Lockt uint64 |
|
||||||
Locku uint64 |
|
||||||
Access uint64 |
|
||||||
Getattr uint64 |
|
||||||
Lookup uint64 |
|
||||||
LookupRoot uint64 |
|
||||||
Remove uint64 |
|
||||||
Rename uint64 |
|
||||||
Link uint64 |
|
||||||
Symlink uint64 |
|
||||||
Create uint64 |
|
||||||
Pathconf uint64 |
|
||||||
StatFs uint64 |
|
||||||
ReadLink uint64 |
|
||||||
ReadDir uint64 |
|
||||||
ServerCaps uint64 |
|
||||||
DelegReturn uint64 |
|
||||||
GetACL uint64 |
|
||||||
SetACL uint64 |
|
||||||
FsLocations uint64 |
|
||||||
ReleaseLockowner uint64 |
|
||||||
Secinfo uint64 |
|
||||||
FsidPresent uint64 |
|
||||||
ExchangeID uint64 |
|
||||||
CreateSession uint64 |
|
||||||
DestroySession uint64 |
|
||||||
Sequence uint64 |
|
||||||
GetLeaseTime uint64 |
|
||||||
ReclaimComplete uint64 |
|
||||||
LayoutGet uint64 |
|
||||||
GetDeviceInfo uint64 |
|
||||||
LayoutCommit uint64 |
|
||||||
LayoutReturn uint64 |
|
||||||
SecinfoNoName uint64 |
|
||||||
TestStateID uint64 |
|
||||||
FreeStateID uint64 |
|
||||||
GetDeviceList uint64 |
|
||||||
BindConnToSession uint64 |
|
||||||
DestroyClientID uint64 |
|
||||||
Seek uint64 |
|
||||||
Allocate uint64 |
|
||||||
DeAllocate uint64 |
|
||||||
LayoutStats uint64 |
|
||||||
Clone uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// ServerV4Stats models the nfsd "proc4" line.
|
|
||||||
type ServerV4Stats struct { |
|
||||||
Null uint64 |
|
||||||
Compound uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// V4Ops models the "proc4ops" line: NFSv4 operations
|
|
||||||
// Variable list, see:
|
|
||||||
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
|
|
||||||
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
|
|
||||||
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
|
|
||||||
type V4Ops struct { |
|
||||||
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
|
|
||||||
Op0Unused uint64 |
|
||||||
Op1Unused uint64 |
|
||||||
Op2Future uint64 |
|
||||||
Access uint64 |
|
||||||
Close uint64 |
|
||||||
Commit uint64 |
|
||||||
Create uint64 |
|
||||||
DelegPurge uint64 |
|
||||||
DelegReturn uint64 |
|
||||||
GetAttr uint64 |
|
||||||
GetFH uint64 |
|
||||||
Link uint64 |
|
||||||
Lock uint64 |
|
||||||
Lockt uint64 |
|
||||||
Locku uint64 |
|
||||||
Lookup uint64 |
|
||||||
LookupRoot uint64 |
|
||||||
Nverify uint64 |
|
||||||
Open uint64 |
|
||||||
OpenAttr uint64 |
|
||||||
OpenConfirm uint64 |
|
||||||
OpenDgrd uint64 |
|
||||||
PutFH uint64 |
|
||||||
PutPubFH uint64 |
|
||||||
PutRootFH uint64 |
|
||||||
Read uint64 |
|
||||||
ReadDir uint64 |
|
||||||
ReadLink uint64 |
|
||||||
Remove uint64 |
|
||||||
Rename uint64 |
|
||||||
Renew uint64 |
|
||||||
RestoreFH uint64 |
|
||||||
SaveFH uint64 |
|
||||||
SecInfo uint64 |
|
||||||
SetAttr uint64 |
|
||||||
Verify uint64 |
|
||||||
Write uint64 |
|
||||||
RelLockOwner uint64 |
|
||||||
} |
|
||||||
|
|
||||||
// ClientRPCStats models all stats from /proc/net/rpc/nfs.
|
|
||||||
type ClientRPCStats struct { |
|
||||||
Network Network |
|
||||||
ClientRPC ClientRPC |
|
||||||
V2Stats V2Stats |
|
||||||
V3Stats V3Stats |
|
||||||
ClientV4Stats ClientV4Stats |
|
||||||
} |
|
||||||
|
|
||||||
// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
|
|
||||||
type ServerRPCStats struct { |
|
||||||
ReplyCache ReplyCache |
|
||||||
FileHandles FileHandles |
|
||||||
InputOutput InputOutput |
|
||||||
Threads Threads |
|
||||||
ReadAheadCache ReadAheadCache |
|
||||||
Network Network |
|
||||||
ServerRPC ServerRPC |
|
||||||
V2Stats V2Stats |
|
||||||
V3Stats V3Stats |
|
||||||
ServerV4Stats ServerV4Stats |
|
||||||
V4Ops V4Ops |
|
||||||
} |
|
@ -1,317 +0,0 @@ |
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package nfs |
|
||||||
|
|
||||||
import ( |
|
||||||
"fmt" |
|
||||||
) |
|
||||||
|
|
||||||
func parseReplyCache(v []uint64) (ReplyCache, error) { |
|
||||||
if len(v) != 3 { |
|
||||||
return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return ReplyCache{ |
|
||||||
Hits: v[0], |
|
||||||
Misses: v[1], |
|
||||||
NoCache: v[2], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseFileHandles(v []uint64) (FileHandles, error) { |
|
||||||
if len(v) != 5 { |
|
||||||
return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return FileHandles{ |
|
||||||
Stale: v[0], |
|
||||||
TotalLookups: v[1], |
|
||||||
AnonLookups: v[2], |
|
||||||
DirNoCache: v[3], |
|
||||||
NoDirNoCache: v[4], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseInputOutput(v []uint64) (InputOutput, error) { |
|
||||||
if len(v) != 2 { |
|
||||||
return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return InputOutput{ |
|
||||||
Read: v[0], |
|
||||||
Write: v[1], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseThreads(v []uint64) (Threads, error) { |
|
||||||
if len(v) != 2 { |
|
||||||
return Threads{}, fmt.Errorf("invalid Threads line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return Threads{ |
|
||||||
Threads: v[0], |
|
||||||
FullCnt: v[1], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { |
|
||||||
if len(v) != 12 { |
|
||||||
return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return ReadAheadCache{ |
|
||||||
CacheSize: v[0], |
|
||||||
CacheHistogram: v[1:11], |
|
||||||
NotFound: v[11], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseNetwork(v []uint64) (Network, error) { |
|
||||||
if len(v) != 4 { |
|
||||||
return Network{}, fmt.Errorf("invalid Network line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return Network{ |
|
||||||
NetCount: v[0], |
|
||||||
UDPCount: v[1], |
|
||||||
TCPCount: v[2], |
|
||||||
TCPConnect: v[3], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseServerRPC(v []uint64) (ServerRPC, error) { |
|
||||||
if len(v) != 5 { |
|
||||||
return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return ServerRPC{ |
|
||||||
RPCCount: v[0], |
|
||||||
BadCnt: v[1], |
|
||||||
BadFmt: v[2], |
|
||||||
BadAuth: v[3], |
|
||||||
BadcInt: v[4], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseClientRPC(v []uint64) (ClientRPC, error) { |
|
||||||
if len(v) != 3 { |
|
||||||
return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return ClientRPC{ |
|
||||||
RPCCount: v[0], |
|
||||||
Retransmissions: v[1], |
|
||||||
AuthRefreshes: v[2], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseV2Stats(v []uint64) (V2Stats, error) { |
|
||||||
values := int(v[0]) |
|
||||||
if len(v[1:]) != values || values != 18 { |
|
||||||
return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return V2Stats{ |
|
||||||
Null: v[1], |
|
||||||
GetAttr: v[2], |
|
||||||
SetAttr: v[3], |
|
||||||
Root: v[4], |
|
||||||
Lookup: v[5], |
|
||||||
ReadLink: v[6], |
|
||||||
Read: v[7], |
|
||||||
WrCache: v[8], |
|
||||||
Write: v[9], |
|
||||||
Create: v[10], |
|
||||||
Remove: v[11], |
|
||||||
Rename: v[12], |
|
||||||
Link: v[13], |
|
||||||
SymLink: v[14], |
|
||||||
MkDir: v[15], |
|
||||||
RmDir: v[16], |
|
||||||
ReadDir: v[17], |
|
||||||
FsStat: v[18], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseV3Stats(v []uint64) (V3Stats, error) { |
|
||||||
values := int(v[0]) |
|
||||||
if len(v[1:]) != values || values != 22 { |
|
||||||
return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return V3Stats{ |
|
||||||
Null: v[1], |
|
||||||
GetAttr: v[2], |
|
||||||
SetAttr: v[3], |
|
||||||
Lookup: v[4], |
|
||||||
Access: v[5], |
|
||||||
ReadLink: v[6], |
|
||||||
Read: v[7], |
|
||||||
Write: v[8], |
|
||||||
Create: v[9], |
|
||||||
MkDir: v[10], |
|
||||||
SymLink: v[11], |
|
||||||
MkNod: v[12], |
|
||||||
Remove: v[13], |
|
||||||
RmDir: v[14], |
|
||||||
Rename: v[15], |
|
||||||
Link: v[16], |
|
||||||
ReadDir: v[17], |
|
||||||
ReadDirPlus: v[18], |
|
||||||
FsStat: v[19], |
|
||||||
FsInfo: v[20], |
|
||||||
PathConf: v[21], |
|
||||||
Commit: v[22], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { |
|
||||||
values := int(v[0]) |
|
||||||
if len(v[1:]) != values { |
|
||||||
return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
// This function currently supports mapping 59 NFS v4 client stats. Older
|
|
||||||
// kernels may emit fewer stats, so we must detect this and pad out the
|
|
||||||
// values to match the expected slice size.
|
|
||||||
if values < 59 { |
|
||||||
newValues := make([]uint64, 60) |
|
||||||
copy(newValues, v) |
|
||||||
v = newValues |
|
||||||
} |
|
||||||
|
|
||||||
return ClientV4Stats{ |
|
||||||
Null: v[1], |
|
||||||
Read: v[2], |
|
||||||
Write: v[3], |
|
||||||
Commit: v[4], |
|
||||||
Open: v[5], |
|
||||||
OpenConfirm: v[6], |
|
||||||
OpenNoattr: v[7], |
|
||||||
OpenDowngrade: v[8], |
|
||||||
Close: v[9], |
|
||||||
Setattr: v[10], |
|
||||||
FsInfo: v[11], |
|
||||||
Renew: v[12], |
|
||||||
SetClientID: v[13], |
|
||||||
SetClientIDConfirm: v[14], |
|
||||||
Lock: v[15], |
|
||||||
Lockt: v[16], |
|
||||||
Locku: v[17], |
|
||||||
Access: v[18], |
|
||||||
Getattr: v[19], |
|
||||||
Lookup: v[20], |
|
||||||
LookupRoot: v[21], |
|
||||||
Remove: v[22], |
|
||||||
Rename: v[23], |
|
||||||
Link: v[24], |
|
||||||
Symlink: v[25], |
|
||||||
Create: v[26], |
|
||||||
Pathconf: v[27], |
|
||||||
StatFs: v[28], |
|
||||||
ReadLink: v[29], |
|
||||||
ReadDir: v[30], |
|
||||||
ServerCaps: v[31], |
|
||||||
DelegReturn: v[32], |
|
||||||
GetACL: v[33], |
|
||||||
SetACL: v[34], |
|
||||||
FsLocations: v[35], |
|
||||||
ReleaseLockowner: v[36], |
|
||||||
Secinfo: v[37], |
|
||||||
FsidPresent: v[38], |
|
||||||
ExchangeID: v[39], |
|
||||||
CreateSession: v[40], |
|
||||||
DestroySession: v[41], |
|
||||||
Sequence: v[42], |
|
||||||
GetLeaseTime: v[43], |
|
||||||
ReclaimComplete: v[44], |
|
||||||
LayoutGet: v[45], |
|
||||||
GetDeviceInfo: v[46], |
|
||||||
LayoutCommit: v[47], |
|
||||||
LayoutReturn: v[48], |
|
||||||
SecinfoNoName: v[49], |
|
||||||
TestStateID: v[50], |
|
||||||
FreeStateID: v[51], |
|
||||||
GetDeviceList: v[52], |
|
||||||
BindConnToSession: v[53], |
|
||||||
DestroyClientID: v[54], |
|
||||||
Seek: v[55], |
|
||||||
Allocate: v[56], |
|
||||||
DeAllocate: v[57], |
|
||||||
LayoutStats: v[58], |
|
||||||
Clone: v[59], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { |
|
||||||
values := int(v[0]) |
|
||||||
if len(v[1:]) != values || values != 2 { |
|
||||||
return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
return ServerV4Stats{ |
|
||||||
Null: v[1], |
|
||||||
Compound: v[2], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
func parseV4Ops(v []uint64) (V4Ops, error) { |
|
||||||
values := int(v[0]) |
|
||||||
if len(v[1:]) != values || values < 39 { |
|
||||||
return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) |
|
||||||
} |
|
||||||
|
|
||||||
stats := V4Ops{ |
|
||||||
Op0Unused: v[1], |
|
||||||
Op1Unused: v[2], |
|
||||||
Op2Future: v[3], |
|
||||||
Access: v[4], |
|
||||||
Close: v[5], |
|
||||||
Commit: v[6], |
|
||||||
Create: v[7], |
|
||||||
DelegPurge: v[8], |
|
||||||
DelegReturn: v[9], |
|
||||||
GetAttr: v[10], |
|
||||||
GetFH: v[11], |
|
||||||
Link: v[12], |
|
||||||
Lock: v[13], |
|
||||||
Lockt: v[14], |
|
||||||
Locku: v[15], |
|
||||||
Lookup: v[16], |
|
||||||
LookupRoot: v[17], |
|
||||||
Nverify: v[18], |
|
||||||
Open: v[19], |
|
||||||
OpenAttr: v[20], |
|
||||||
OpenConfirm: v[21], |
|
||||||
OpenDgrd: v[22], |
|
||||||
PutFH: v[23], |
|
||||||
PutPubFH: v[24], |
|
||||||
PutRootFH: v[25], |
|
||||||
Read: v[26], |
|
||||||
ReadDir: v[27], |
|
||||||
ReadLink: v[28], |
|
||||||
Remove: v[29], |
|
||||||
Rename: v[30], |
|
||||||
Renew: v[31], |
|
||||||
RestoreFH: v[32], |
|
||||||
SaveFH: v[33], |
|
||||||
SecInfo: v[34], |
|
||||||
SetAttr: v[35], |
|
||||||
Verify: v[36], |
|
||||||
Write: v[37], |
|
||||||
RelLockOwner: v[38], |
|
||||||
} |
|
||||||
|
|
||||||
return stats, nil |
|
||||||
} |
|
@ -1,67 +0,0 @@ |
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package nfs |
|
||||||
|
|
||||||
import ( |
|
||||||
"bufio" |
|
||||||
"fmt" |
|
||||||
"io" |
|
||||||
"strings" |
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util" |
|
||||||
) |
|
||||||
|
|
||||||
// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
|
|
||||||
func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { |
|
||||||
stats := &ClientRPCStats{} |
|
||||||
|
|
||||||
scanner := bufio.NewScanner(r) |
|
||||||
for scanner.Scan() { |
|
||||||
line := scanner.Text() |
|
||||||
parts := strings.Fields(scanner.Text()) |
|
||||||
// require at least <key> <value>
|
|
||||||
if len(parts) < 2 { |
|
||||||
return nil, fmt.Errorf("invalid NFS metric line %q", line) |
|
||||||
} |
|
||||||
|
|
||||||
values, err := util.ParseUint64s(parts[1:]) |
|
||||||
if err != nil { |
|
||||||
return nil, fmt.Errorf("error parsing NFS metric line: %s", err) |
|
||||||
} |
|
||||||
|
|
||||||
switch metricLine := parts[0]; metricLine { |
|
||||||
case "net": |
|
||||||
stats.Network, err = parseNetwork(values) |
|
||||||
case "rpc": |
|
||||||
stats.ClientRPC, err = parseClientRPC(values) |
|
||||||
case "proc2": |
|
||||||
stats.V2Stats, err = parseV2Stats(values) |
|
||||||
case "proc3": |
|
||||||
stats.V3Stats, err = parseV3Stats(values) |
|
||||||
case "proc4": |
|
||||||
stats.ClientV4Stats, err = parseClientV4Stats(values) |
|
||||||
default: |
|
||||||
return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) |
|
||||||
} |
|
||||||
if err != nil { |
|
||||||
return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil { |
|
||||||
return nil, fmt.Errorf("error scanning NFS file: %s", err) |
|
||||||
} |
|
||||||
|
|
||||||
return stats, nil |
|
||||||
} |
|
@ -1,89 +0,0 @@ |
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package nfs |
|
||||||
|
|
||||||
import ( |
|
||||||
"bufio" |
|
||||||
"fmt" |
|
||||||
"io" |
|
||||||
"strings" |
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util" |
|
||||||
) |
|
||||||
|
|
||||||
// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
|
|
||||||
func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { |
|
||||||
stats := &ServerRPCStats{} |
|
||||||
|
|
||||||
scanner := bufio.NewScanner(r) |
|
||||||
for scanner.Scan() { |
|
||||||
line := scanner.Text() |
|
||||||
parts := strings.Fields(scanner.Text()) |
|
||||||
// require at least <key> <value>
|
|
||||||
if len(parts) < 2 { |
|
||||||
return nil, fmt.Errorf("invalid NFSd metric line %q", line) |
|
||||||
} |
|
||||||
label := parts[0] |
|
||||||
|
|
||||||
var values []uint64 |
|
||||||
var err error |
|
||||||
if label == "th" { |
|
||||||
if len(parts) < 3 { |
|
||||||
return nil, fmt.Errorf("invalid NFSd th metric line %q", line) |
|
||||||
} |
|
||||||
values, err = util.ParseUint64s(parts[1:3]) |
|
||||||
} else { |
|
||||||
values, err = util.ParseUint64s(parts[1:]) |
|
||||||
} |
|
||||||
if err != nil { |
|
||||||
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) |
|
||||||
} |
|
||||||
|
|
||||||
switch metricLine := parts[0]; metricLine { |
|
||||||
case "rc": |
|
||||||
stats.ReplyCache, err = parseReplyCache(values) |
|
||||||
case "fh": |
|
||||||
stats.FileHandles, err = parseFileHandles(values) |
|
||||||
case "io": |
|
||||||
stats.InputOutput, err = parseInputOutput(values) |
|
||||||
case "th": |
|
||||||
stats.Threads, err = parseThreads(values) |
|
||||||
case "ra": |
|
||||||
stats.ReadAheadCache, err = parseReadAheadCache(values) |
|
||||||
case "net": |
|
||||||
stats.Network, err = parseNetwork(values) |
|
||||||
case "rpc": |
|
||||||
stats.ServerRPC, err = parseServerRPC(values) |
|
||||||
case "proc2": |
|
||||||
stats.V2Stats, err = parseV2Stats(values) |
|
||||||
case "proc3": |
|
||||||
stats.V3Stats, err = parseV3Stats(values) |
|
||||||
case "proc4": |
|
||||||
stats.ServerV4Stats, err = parseServerV4Stats(values) |
|
||||||
case "proc4ops": |
|
||||||
stats.V4Ops, err = parseV4Ops(values) |
|
||||||
default: |
|
||||||
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) |
|
||||||
} |
|
||||||
if err != nil { |
|
||||||
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil { |
|
||||||
return nil, fmt.Errorf("error scanning NFSd file: %s", err) |
|
||||||
} |
|
||||||
|
|
||||||
return stats, nil |
|
||||||
} |
|
@ -0,0 +1,110 @@ |
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs |
||||||
|
|
||||||
|
// The PSI / pressure interface is described at
|
||||||
|
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
|
||||||
|
// Each resource (cpu, io, memory, ...) is exposed as a single file.
|
||||||
|
// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
|
||||||
|
// Each line contains several averages (over n seconds) and a total in µs.
|
||||||
|
//
|
||||||
|
// Example io pressure file:
|
||||||
|
// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
|
||||||
|
// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
|
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"io/ioutil" |
||||||
|
"os" |
||||||
|
"strings" |
||||||
|
) |
||||||
|
|
||||||
|
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" |
||||||
|
|
||||||
|
// PSILine is a single line of values as returned by /proc/pressure/*
|
||||||
|
// The Avg entries are averages over n seconds, as a percentage
|
||||||
|
// The Total line is in microseconds
|
||||||
|
type PSILine struct { |
||||||
|
Avg10 float64 |
||||||
|
Avg60 float64 |
||||||
|
Avg300 float64 |
||||||
|
Total uint64 |
||||||
|
} |
||||||
|
|
||||||
|
// PSIStats represent pressure stall information from /proc/pressure/*
|
||||||
|
// Some indicates the share of time in which at least some tasks are stalled
|
||||||
|
// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
|
||||||
|
type PSIStats struct { |
||||||
|
Some *PSILine |
||||||
|
Full *PSILine |
||||||
|
} |
||||||
|
|
||||||
|
// NewPSIStatsForResource reads pressure stall information for the specified
|
||||||
|
// resource. At time of writing this can be either "cpu", "memory" or "io".
|
||||||
|
func NewPSIStatsForResource(resource string) (PSIStats, error) { |
||||||
|
fs, err := NewFS(DefaultMountPoint) |
||||||
|
if err != nil { |
||||||
|
return PSIStats{}, err |
||||||
|
} |
||||||
|
|
||||||
|
return fs.NewPSIStatsForResource(resource) |
||||||
|
} |
||||||
|
|
||||||
|
// NewPSIStatsForResource reads pressure stall information from /proc/pressure/<resource>
|
||||||
|
func (fs FS) NewPSIStatsForResource(resource string) (PSIStats, error) { |
||||||
|
file, err := os.Open(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) |
||||||
|
if err != nil { |
||||||
|
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) |
||||||
|
} |
||||||
|
|
||||||
|
defer file.Close() |
||||||
|
return parsePSIStats(resource, file) |
||||||
|
} |
||||||
|
|
||||||
|
// parsePSIStats parses the specified file for pressure stall information
|
||||||
|
func parsePSIStats(resource string, file io.Reader) (PSIStats, error) { |
||||||
|
psiStats := PSIStats{} |
||||||
|
stats, err := ioutil.ReadAll(file) |
||||||
|
if err != nil { |
||||||
|
return psiStats, fmt.Errorf("psi_stats: unable to read data for %s", resource) |
||||||
|
} |
||||||
|
|
||||||
|
for _, l := range strings.Split(string(stats), "\n") { |
||||||
|
prefix := strings.Split(l, " ")[0] |
||||||
|
switch prefix { |
||||||
|
case "some": |
||||||
|
psi := PSILine{} |
||||||
|
_, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) |
||||||
|
if err != nil { |
||||||
|
return PSIStats{}, err |
||||||
|
} |
||||||
|
psiStats.Some = &psi |
||||||
|
case "full": |
||||||
|
psi := PSILine{} |
||||||
|
_, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) |
||||||
|
if err != nil { |
||||||
|
return PSIStats{}, err |
||||||
|
} |
||||||
|
psiStats.Full = &psi |
||||||
|
default: |
||||||
|
// If we encounter a line with an unknown prefix, ignore it and move on
|
||||||
|
// Should new measurement types be added in the future we'll simply ignore them instead
|
||||||
|
// of erroring on retrieval
|
||||||
|
continue |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return psiStats, nil |
||||||
|
} |
@ -1,330 +0,0 @@ |
|||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package xfs |
|
||||||
|
|
||||||
import ( |
|
||||||
"bufio" |
|
||||||
"fmt" |
|
||||||
"io" |
|
||||||
"strings" |
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util" |
|
||||||
) |
|
||||||
|
|
||||||
// ParseStats parses a Stats from an input io.Reader, using the format
|
|
||||||
// found in /proc/fs/xfs/stat.
|
|
||||||
func ParseStats(r io.Reader) (*Stats, error) { |
|
||||||
const ( |
|
||||||
// Fields parsed into stats structures.
|
|
||||||
fieldExtentAlloc = "extent_alloc" |
|
||||||
fieldAbt = "abt" |
|
||||||
fieldBlkMap = "blk_map" |
|
||||||
fieldBmbt = "bmbt" |
|
||||||
fieldDir = "dir" |
|
||||||
fieldTrans = "trans" |
|
||||||
fieldIg = "ig" |
|
||||||
fieldLog = "log" |
|
||||||
fieldRw = "rw" |
|
||||||
fieldAttr = "attr" |
|
||||||
fieldIcluster = "icluster" |
|
||||||
fieldVnodes = "vnodes" |
|
||||||
fieldBuf = "buf" |
|
||||||
fieldXpc = "xpc" |
|
||||||
|
|
||||||
// Unimplemented at this time due to lack of documentation.
|
|
||||||
fieldPushAil = "push_ail" |
|
||||||
fieldXstrat = "xstrat" |
|
||||||
fieldAbtb2 = "abtb2" |
|
||||||
fieldAbtc2 = "abtc2" |
|
||||||
fieldBmbt2 = "bmbt2" |
|
||||||
fieldIbt2 = "ibt2" |
|
||||||
fieldFibt2 = "fibt2" |
|
||||||
fieldQm = "qm" |
|
||||||
fieldDebug = "debug" |
|
||||||
) |
|
||||||
|
|
||||||
var xfss Stats |
|
||||||
|
|
||||||
s := bufio.NewScanner(r) |
|
||||||
for s.Scan() { |
|
||||||
// Expect at least a string label and a single integer value, ex:
|
|
||||||
// - abt 0
|
|
||||||
// - rw 1 2
|
|
||||||
ss := strings.Fields(string(s.Bytes())) |
|
||||||
if len(ss) < 2 { |
|
||||||
continue |
|
||||||
} |
|
||||||
label := ss[0] |
|
||||||
|
|
||||||
// Extended precision counters are uint64 values.
|
|
||||||
if label == fieldXpc { |
|
||||||
us, err := util.ParseUint64s(ss[1:]) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
xfss.ExtendedPrecision, err = extendedPrecisionStats(us) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
continue |
|
||||||
} |
|
||||||
|
|
||||||
// All other counters are uint32 values.
|
|
||||||
us, err := util.ParseUint32s(ss[1:]) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
switch label { |
|
||||||
case fieldExtentAlloc: |
|
||||||
xfss.ExtentAllocation, err = extentAllocationStats(us) |
|
||||||
case fieldAbt: |
|
||||||
xfss.AllocationBTree, err = btreeStats(us) |
|
||||||
case fieldBlkMap: |
|
||||||
xfss.BlockMapping, err = blockMappingStats(us) |
|
||||||
case fieldBmbt: |
|
||||||
xfss.BlockMapBTree, err = btreeStats(us) |
|
||||||
case fieldDir: |
|
||||||
xfss.DirectoryOperation, err = directoryOperationStats(us) |
|
||||||
case fieldTrans: |
|
||||||
xfss.Transaction, err = transactionStats(us) |
|
||||||
case fieldIg: |
|
||||||
xfss.InodeOperation, err = inodeOperationStats(us) |
|
||||||
case fieldLog: |
|
||||||
xfss.LogOperation, err = logOperationStats(us) |
|
||||||
case fieldRw: |
|
||||||
xfss.ReadWrite, err = readWriteStats(us) |
|
||||||
case fieldAttr: |
|
||||||
xfss.AttributeOperation, err = attributeOperationStats(us) |
|
||||||
case fieldIcluster: |
|
||||||
xfss.InodeClustering, err = inodeClusteringStats(us) |
|
||||||
case fieldVnodes: |
|
||||||
xfss.Vnode, err = vnodeStats(us) |
|
||||||
case fieldBuf: |
|
||||||
xfss.Buffer, err = bufferStats(us) |
|
||||||
} |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
return &xfss, s.Err() |
|
||||||
} |
|
||||||
|
|
||||||
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
|
|
||||||
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { |
|
||||||
if l := len(us); l != 4 { |
|
||||||
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return ExtentAllocationStats{ |
|
||||||
ExtentsAllocated: us[0], |
|
||||||
BlocksAllocated: us[1], |
|
||||||
ExtentsFreed: us[2], |
|
||||||
BlocksFreed: us[3], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// btreeStats builds a BTreeStats from a slice of uint32s.
|
|
||||||
func btreeStats(us []uint32) (BTreeStats, error) { |
|
||||||
if l := len(us); l != 4 { |
|
||||||
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return BTreeStats{ |
|
||||||
Lookups: us[0], |
|
||||||
Compares: us[1], |
|
||||||
RecordsInserted: us[2], |
|
||||||
RecordsDeleted: us[3], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
|
|
||||||
func blockMappingStats(us []uint32) (BlockMappingStats, error) { |
|
||||||
if l := len(us); l != 7 { |
|
||||||
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return BlockMappingStats{ |
|
||||||
Reads: us[0], |
|
||||||
Writes: us[1], |
|
||||||
Unmaps: us[2], |
|
||||||
ExtentListInsertions: us[3], |
|
||||||
ExtentListDeletions: us[4], |
|
||||||
ExtentListLookups: us[5], |
|
||||||
ExtentListCompares: us[6], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
|
|
||||||
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { |
|
||||||
if l := len(us); l != 4 { |
|
||||||
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return DirectoryOperationStats{ |
|
||||||
Lookups: us[0], |
|
||||||
Creates: us[1], |
|
||||||
Removes: us[2], |
|
||||||
Getdents: us[3], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// TransactionStats builds a TransactionStats from a slice of uint32s.
|
|
||||||
func transactionStats(us []uint32) (TransactionStats, error) { |
|
||||||
if l := len(us); l != 3 { |
|
||||||
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return TransactionStats{ |
|
||||||
Sync: us[0], |
|
||||||
Async: us[1], |
|
||||||
Empty: us[2], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
|
|
||||||
func inodeOperationStats(us []uint32) (InodeOperationStats, error) { |
|
||||||
if l := len(us); l != 7 { |
|
||||||
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return InodeOperationStats{ |
|
||||||
Attempts: us[0], |
|
||||||
Found: us[1], |
|
||||||
Recycle: us[2], |
|
||||||
Missed: us[3], |
|
||||||
Duplicate: us[4], |
|
||||||
Reclaims: us[5], |
|
||||||
AttributeChange: us[6], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
|
|
||||||
func logOperationStats(us []uint32) (LogOperationStats, error) { |
|
||||||
if l := len(us); l != 5 { |
|
||||||
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return LogOperationStats{ |
|
||||||
Writes: us[0], |
|
||||||
Blocks: us[1], |
|
||||||
NoInternalBuffers: us[2], |
|
||||||
Force: us[3], |
|
||||||
ForceSleep: us[4], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
|
|
||||||
func readWriteStats(us []uint32) (ReadWriteStats, error) { |
|
||||||
if l := len(us); l != 2 { |
|
||||||
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return ReadWriteStats{ |
|
||||||
Read: us[0], |
|
||||||
Write: us[1], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
|
|
||||||
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { |
|
||||||
if l := len(us); l != 4 { |
|
||||||
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return AttributeOperationStats{ |
|
||||||
Get: us[0], |
|
||||||
Set: us[1], |
|
||||||
Remove: us[2], |
|
||||||
List: us[3], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
|
|
||||||
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { |
|
||||||
if l := len(us); l != 3 { |
|
||||||
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return InodeClusteringStats{ |
|
||||||
Iflush: us[0], |
|
||||||
Flush: us[1], |
|
||||||
FlushInode: us[2], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// VnodeStats builds a VnodeStats from a slice of uint32s.
|
|
||||||
func vnodeStats(us []uint32) (VnodeStats, error) { |
|
||||||
// The attribute "Free" appears to not be available on older XFS
|
|
||||||
// stats versions. Therefore, 7 or 8 elements may appear in
|
|
||||||
// this slice.
|
|
||||||
l := len(us) |
|
||||||
if l != 7 && l != 8 { |
|
||||||
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
s := VnodeStats{ |
|
||||||
Active: us[0], |
|
||||||
Allocate: us[1], |
|
||||||
Get: us[2], |
|
||||||
Hold: us[3], |
|
||||||
Release: us[4], |
|
||||||
Reclaim: us[5], |
|
||||||
Remove: us[6], |
|
||||||
} |
|
||||||
|
|
||||||
// Skip adding free, unless it is present. The zero value will
|
|
||||||
// be used in place of an actual count.
|
|
||||||
if l == 7 { |
|
||||||
return s, nil |
|
||||||
} |
|
||||||
|
|
||||||
s.Free = us[7] |
|
||||||
return s, nil |
|
||||||
} |
|
||||||
|
|
||||||
// BufferStats builds a BufferStats from a slice of uint32s.
|
|
||||||
func bufferStats(us []uint32) (BufferStats, error) { |
|
||||||
if l := len(us); l != 9 { |
|
||||||
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return BufferStats{ |
|
||||||
Get: us[0], |
|
||||||
Create: us[1], |
|
||||||
GetLocked: us[2], |
|
||||||
GetLockedWaited: us[3], |
|
||||||
BusyLocked: us[4], |
|
||||||
MissLocked: us[5], |
|
||||||
PageRetries: us[6], |
|
||||||
PageFound: us[7], |
|
||||||
GetRead: us[8], |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
|
|
||||||
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { |
|
||||||
if l := len(us); l != 3 { |
|
||||||
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) |
|
||||||
} |
|
||||||
|
|
||||||
return ExtendedPrecisionStats{ |
|
||||||
FlushBytes: us[0], |
|
||||||
WriteBytes: us[1], |
|
||||||
ReadBytes: us[2], |
|
||||||
}, nil |
|
||||||
} |
|
@ -1,163 +0,0 @@ |
|||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package xfs provides access to statistics exposed by the XFS filesystem.
|
|
||||||
package xfs |
|
||||||
|
|
||||||
// Stats contains XFS filesystem runtime statistics, parsed from
|
|
||||||
// /proc/fs/xfs/stat.
|
|
||||||
//
|
|
||||||
// The names and meanings of each statistic were taken from
|
|
||||||
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
|
|
||||||
// kernel source. Most counters are uint32s (same data types used in
|
|
||||||
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
|
|
||||||
type Stats struct { |
|
||||||
// The name of the filesystem used to source these statistics.
|
|
||||||
// If empty, this indicates aggregated statistics for all XFS
|
|
||||||
// filesystems on the host.
|
|
||||||
Name string |
|
||||||
|
|
||||||
ExtentAllocation ExtentAllocationStats |
|
||||||
AllocationBTree BTreeStats |
|
||||||
BlockMapping BlockMappingStats |
|
||||||
BlockMapBTree BTreeStats |
|
||||||
DirectoryOperation DirectoryOperationStats |
|
||||||
Transaction TransactionStats |
|
||||||
InodeOperation InodeOperationStats |
|
||||||
LogOperation LogOperationStats |
|
||||||
ReadWrite ReadWriteStats |
|
||||||
AttributeOperation AttributeOperationStats |
|
||||||
InodeClustering InodeClusteringStats |
|
||||||
Vnode VnodeStats |
|
||||||
Buffer BufferStats |
|
||||||
ExtendedPrecision ExtendedPrecisionStats |
|
||||||
} |
|
||||||
|
|
||||||
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
|
|
||||||
type ExtentAllocationStats struct { |
|
||||||
ExtentsAllocated uint32 |
|
||||||
BlocksAllocated uint32 |
|
||||||
ExtentsFreed uint32 |
|
||||||
BlocksFreed uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// BTreeStats contains statistics regarding an XFS internal B-tree.
|
|
||||||
type BTreeStats struct { |
|
||||||
Lookups uint32 |
|
||||||
Compares uint32 |
|
||||||
RecordsInserted uint32 |
|
||||||
RecordsDeleted uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// BlockMappingStats contains statistics regarding XFS block maps.
|
|
||||||
type BlockMappingStats struct { |
|
||||||
Reads uint32 |
|
||||||
Writes uint32 |
|
||||||
Unmaps uint32 |
|
||||||
ExtentListInsertions uint32 |
|
||||||
ExtentListDeletions uint32 |
|
||||||
ExtentListLookups uint32 |
|
||||||
ExtentListCompares uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// DirectoryOperationStats contains statistics regarding XFS directory entries.
|
|
||||||
type DirectoryOperationStats struct { |
|
||||||
Lookups uint32 |
|
||||||
Creates uint32 |
|
||||||
Removes uint32 |
|
||||||
Getdents uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// TransactionStats contains statistics regarding XFS metadata transactions.
|
|
||||||
type TransactionStats struct { |
|
||||||
Sync uint32 |
|
||||||
Async uint32 |
|
||||||
Empty uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// InodeOperationStats contains statistics regarding XFS inode operations.
|
|
||||||
type InodeOperationStats struct { |
|
||||||
Attempts uint32 |
|
||||||
Found uint32 |
|
||||||
Recycle uint32 |
|
||||||
Missed uint32 |
|
||||||
Duplicate uint32 |
|
||||||
Reclaims uint32 |
|
||||||
AttributeChange uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// LogOperationStats contains statistics regarding the XFS log buffer.
|
|
||||||
type LogOperationStats struct { |
|
||||||
Writes uint32 |
|
||||||
Blocks uint32 |
|
||||||
NoInternalBuffers uint32 |
|
||||||
Force uint32 |
|
||||||
ForceSleep uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// ReadWriteStats contains statistics regarding the number of read and write
|
|
||||||
// system calls for XFS filesystems.
|
|
||||||
type ReadWriteStats struct { |
|
||||||
Read uint32 |
|
||||||
Write uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// AttributeOperationStats contains statistics regarding manipulation of
|
|
||||||
// XFS extended file attributes.
|
|
||||||
type AttributeOperationStats struct { |
|
||||||
Get uint32 |
|
||||||
Set uint32 |
|
||||||
Remove uint32 |
|
||||||
List uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// InodeClusteringStats contains statistics regarding XFS inode clustering
|
|
||||||
// operations.
|
|
||||||
type InodeClusteringStats struct { |
|
||||||
Iflush uint32 |
|
||||||
Flush uint32 |
|
||||||
FlushInode uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// VnodeStats contains statistics regarding XFS vnode operations.
|
|
||||||
type VnodeStats struct { |
|
||||||
Active uint32 |
|
||||||
Allocate uint32 |
|
||||||
Get uint32 |
|
||||||
Hold uint32 |
|
||||||
Release uint32 |
|
||||||
Reclaim uint32 |
|
||||||
Remove uint32 |
|
||||||
Free uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// BufferStats contains statistics regarding XFS read/write I/O buffers.
|
|
||||||
type BufferStats struct { |
|
||||||
Get uint32 |
|
||||||
Create uint32 |
|
||||||
GetLocked uint32 |
|
||||||
GetLockedWaited uint32 |
|
||||||
BusyLocked uint32 |
|
||||||
MissLocked uint32 |
|
||||||
PageRetries uint32 |
|
||||||
PageFound uint32 |
|
||||||
GetRead uint32 |
|
||||||
} |
|
||||||
|
|
||||||
// ExtendedPrecisionStats contains high precision counters used to track the
|
|
||||||
// total number of bytes read, written, or flushed, during XFS operations.
|
|
||||||
type ExtendedPrecisionStats struct { |
|
||||||
FlushBytes uint64 |
|
||||||
WriteBytes uint64 |
|
||||||
ReadBytes uint64 |
|
||||||
} |
|
Loading…
Reference in new issue