diff options
| author | Kevin Lyda <kevin@ie.suberic.net> | 2017-02-12 14:38:12 +0000 | 
|---|---|---|
| committer | Niall Sheridan <nsheridan@gmail.com> | 2017-02-12 14:38:12 +0000 | 
| commit | ed8bc523fd0d1a66acf3fa449c453508035efdfc (patch) | |
| tree | c05849546e1dd9d771dcc4c4ff52056d249a95fb /vendor/github.com/prometheus | |
| parent | 9c344a0a95c44ef9cebade7b8a65ac160d9eb900 (diff) | |
Initial pass at prometheus support. (#56)
Diffstat (limited to 'vendor/github.com/prometheus')
62 files changed, 11289 insertions, 0 deletions
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ +                                 Apache License +                           Version 2.0, January 2004 +                        http://www.apache.org/licenses/ + +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +   1. Definitions. + +      "License" shall mean the terms and conditions for use, reproduction, +      and distribution as defined by Sections 1 through 9 of this document. + +      "Licensor" shall mean the copyright owner or entity authorized by +      the copyright owner that is granting the License. + +      "Legal Entity" shall mean the union of the acting entity and all +      other entities that control, are controlled by, or are under common +      control with that entity. For the purposes of this definition, +      "control" means (i) the power, direct or indirect, to cause the +      direction or management of such entity, whether by contract or +      otherwise, or (ii) ownership of fifty percent (50%) or more of the +      outstanding shares, or (iii) beneficial ownership of such entity. + +      "You" (or "Your") shall mean an individual or Legal Entity +      exercising permissions granted by this License. + +      "Source" form shall mean the preferred form for making modifications, +      including but not limited to software source code, documentation +      source, and configuration files. + +      "Object" form shall mean any form resulting from mechanical +      transformation or translation of a Source form, including but +      not limited to compiled object code, generated documentation, +      and conversions to other media types. + +      "Work" shall mean the work of authorship, whether in Source or +      Object form, made available under the License, as indicated by a +      copyright notice that is included in or attached to the work +      (an example is provided in the Appendix below). + +      "Derivative Works" shall mean any work, whether in Source or Object +      form, that is based on (or derived from) the Work and for which the +      editorial revisions, annotations, elaborations, or other modifications +      represent, as a whole, an original work of authorship. For the purposes +      of this License, Derivative Works shall not include works that remain +      separable from, or merely link (or bind by name) to the interfaces of, +      the Work and Derivative Works thereof. + +      "Contribution" shall mean any work of authorship, including +      the original version of the Work and any modifications or additions +      to that Work or Derivative Works thereof, that is intentionally +      submitted to Licensor for inclusion in the Work by the copyright owner +      or by an individual or Legal Entity authorized to submit on behalf of +      the copyright owner. For the purposes of this definition, "submitted" +      means any form of electronic, verbal, or written communication sent +      to the Licensor or its representatives, including but not limited to +      communication on electronic mailing lists, source code control systems, +      and issue tracking systems that are managed by, or on behalf of, the +      Licensor for the purpose of discussing and improving the Work, but +      excluding communication that is conspicuously marked or otherwise +      designated in writing by the copyright owner as "Not a Contribution." + +      "Contributor" shall mean Licensor and any individual or Legal Entity +      on behalf of whom a Contribution has been received by Licensor and +      subsequently incorporated within the Work. + +   2. Grant of Copyright License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      copyright license to reproduce, prepare Derivative Works of, +      publicly display, publicly perform, sublicense, and distribute the +      Work and such Derivative Works in Source or Object form. + +   3. Grant of Patent License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      (except as stated in this section) patent license to make, have made, +      use, offer to sell, sell, import, and otherwise transfer the Work, +      where such license applies only to those patent claims licensable +      by such Contributor that are necessarily infringed by their +      Contribution(s) alone or by combination of their Contribution(s) +      with the Work to which such Contribution(s) was submitted. If You +      institute patent litigation against any entity (including a +      cross-claim or counterclaim in a lawsuit) alleging that the Work +      or a Contribution incorporated within the Work constitutes direct +      or contributory patent infringement, then any patent licenses +      granted to You under this License for that Work shall terminate +      as of the date such litigation is filed. + +   4. Redistribution. You may reproduce and distribute copies of the +      Work or Derivative Works thereof in any medium, with or without +      modifications, and in Source or Object form, provided that You +      meet the following conditions: + +      (a) You must give any other recipients of the Work or +          Derivative Works a copy of this License; and + +      (b) You must cause any modified files to carry prominent notices +          stating that You changed the files; and + +      (c) You must retain, in the Source form of any Derivative Works +          that You distribute, all copyright, patent, trademark, and +          attribution notices from the Source form of the Work, +          excluding those notices that do not pertain to any part of +          the Derivative Works; and + +      (d) If the Work includes a "NOTICE" text file as part of its +          distribution, then any Derivative Works that You distribute must +          include a readable copy of the attribution notices contained +          within such NOTICE file, excluding those notices that do not +          pertain to any part of the Derivative Works, in at least one +          of the following places: within a NOTICE text file distributed +          as part of the Derivative Works; within the Source form or +          documentation, if provided along with the Derivative Works; or, +          within a display generated by the Derivative Works, if and +          wherever such third-party notices normally appear. The contents +          of the NOTICE file are for informational purposes only and +          do not modify the License. You may add Your own attribution +          notices within Derivative Works that You distribute, alongside +          or as an addendum to the NOTICE text from the Work, provided +          that such additional attribution notices cannot be construed +          as modifying the License. + +      You may add Your own copyright statement to Your modifications and +      may provide additional or different license terms and conditions +      for use, reproduction, or distribution of Your modifications, or +      for any such Derivative Works as a whole, provided Your use, +      reproduction, and distribution of the Work otherwise complies with +      the conditions stated in this License. + +   5. Submission of Contributions. Unless You explicitly state otherwise, +      any Contribution intentionally submitted for inclusion in the Work +      by You to the Licensor shall be under the terms and conditions of +      this License, without any additional terms or conditions. +      Notwithstanding the above, nothing herein shall supersede or modify +      the terms of any separate license agreement you may have executed +      with Licensor regarding such Contributions. + +   6. Trademarks. This License does not grant permission to use the trade +      names, trademarks, service marks, or product names of the Licensor, +      except as required for reasonable and customary use in describing the +      origin of the Work and reproducing the content of the NOTICE file. + +   7. Disclaimer of Warranty. Unless required by applicable law or +      agreed to in writing, Licensor provides the Work (and each +      Contributor provides its Contributions) on an "AS IS" BASIS, +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +      implied, including, without limitation, any warranties or conditions +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +      PARTICULAR PURPOSE. You are solely responsible for determining the +      appropriateness of using or redistributing the Work and assume any +      risks associated with Your exercise of permissions under this License. + +   8. Limitation of Liability. In no event and under no legal theory, +      whether in tort (including negligence), contract, or otherwise, +      unless required by applicable law (such as deliberate and grossly +      negligent acts) or agreed to in writing, shall any Contributor be +      liable to You for damages, including any direct, indirect, special, +      incidental, or consequential damages of any character arising as a +      result of this License or out of the use or inability to use the +      Work (including but not limited to damages for loss of goodwill, +      work stoppage, computer failure or malfunction, or any and all +      other commercial damages or losses), even if such Contributor +      has been advised of the possibility of such damages. + +   9. Accepting Warranty or Additional Liability. While redistributing +      the Work or Derivative Works thereof, You may choose to offer, +      and charge a fee for, acceptance of support, warranty, indemnity, +      or other liability obligations and/or rights consistent with this +      License. However, in accepting such obligations, You may act only +      on Your own behalf and on Your sole responsibility, not on behalf +      of any other Contributor, and only if You agree to indemnify, +      defend, and hold each Contributor harmless for any liability +      incurred by, or claims asserted against, such Contributor by reason +      of your accepting any such warranty or additional liability. + +   END OF TERMS AND CONDITIONS + +   APPENDIX: How to apply the Apache License to your work. + +      To apply the Apache License to your work, attach the following +      boilerplate notice, with the fields enclosed by brackets "[]" +      replaced with your own identifying information. (Don't include +      the brackets!)  The text should be enclosed in the appropriate +      comment syntax for the file format. We also recommend that a +      file or class name and description of purpose be included on the +      same "printed page" as the copyright notice for easier +      identification within third-party archives. + +   Copyright [yyyy] [name of copyright owner] + +   Licensed under the Apache License, Version 2.0 (the "License"); +   you may not use this file except in compliance with the License. +   You may obtain a copy of the License at + +       http://www.apache.org/licenses/LICENSE-2.0 + +   Unless required by applicable law or agreed to in writing, software +   distributed under the License is distributed on an "AS IS" BASIS, +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +   See the License for the specific language governing permissions and +   limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 0000000..dd878a3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 0000000..44986bf --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 0000000..623d3d8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { +	// Describe sends the super-set of all possible descriptors of metrics +	// collected by this Collector to the provided channel and returns once +	// the last descriptor has been sent. The sent descriptors fulfill the +	// consistency and uniqueness requirements described in the Desc +	// documentation. (It is valid if one and the same Collector sends +	// duplicate descriptors. Those duplicates are simply ignored. However, +	// two different Collectors must not send duplicate descriptors.) This +	// method idempotently sends the same descriptors throughout the +	// lifetime of the Collector. If a Collector encounters an error while +	// executing this method, it must send an invalid descriptor (created +	// with NewInvalidDesc) to signal the error to the registry. +	Describe(chan<- *Desc) +	// Collect is called by the Prometheus registry when collecting +	// metrics. The implementation sends each collected metric via the +	// provided channel and returns once the last metric has been sent. The +	// descriptor of each sent metric is one of those returned by +	// Describe. Returned metrics that share the same descriptor must differ +	// in their variable label values. This method may be called +	// concurrently and must therefore be implemented in a concurrency safe +	// way. Blocking occurs at the expense of total performance of rendering +	// all registered metrics. Ideally, Collector implementations support +	// concurrent readers. +	Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { +	self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { +	c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { +	ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { +	ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 0000000..72d5256 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,164 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"errors" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { +	Metric +	Collector + +	// Inc increments the counter by 1. Use Add to increment it by arbitrary +	// non-negative values. +	Inc() +	// Add adds the given value to the counter. It panics if the value is < +	// 0. +	Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { +	desc := NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		nil, +		opts.ConstLabels, +	) +	result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} +	result.init(result) // Init self-collection. +	return result +} + +type counter struct { +	value +} + +func (c *counter) Add(v float64) { +	if v < 0 { +		panic(errors.New("counter cannot decrease in value")) +	} +	c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. +type CounterVec struct { +	*MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { +	desc := NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		labelNames, +		opts.ConstLabels, +	) +	return &CounterVec{ +		MetricVec: newMetricVec(desc, func(lvs ...string) Metric { +			result := &counter{value: value{ +				desc:       desc, +				valType:    CounterValue, +				labelPairs: makeLabelPairs(desc, lvs), +			}} +			result.init(result) // Init self-collection. +			return result +		}), +	} +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { +	metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +	if metric != nil { +		return metric.(Counter), err +	} +	return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { +	metric, err := m.MetricVec.GetMetricWith(labels) +	if metric != nil { +		return metric.(Counter), err +	} +	return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +//     myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { +	return m.MetricVec.WithLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { +	return m.MetricVec.With(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { +	Metric +	Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { +	return newValueFunc(NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		nil, +		opts.ConstLabels, +	), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 0000000..1835b16 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,200 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"errors" +	"fmt" +	"sort" +	"strings" + +	"github.com/golang/protobuf/proto" +	"github.com/prometheus/common/model" + +	dto "github.com/prometheus/client_model/go" +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { +	// fqName has been built from Namespace, Subsystem, and Name. +	fqName string +	// help provides some helpful information about this metric. +	help string +	// constLabelPairs contains precalculated DTO label pairs based on +	// the constant labels. +	constLabelPairs []*dto.LabelPair +	// VariableLabels contains names of labels for which the metric +	// maintains variable values. +	variableLabels []string +	// id is a hash of the values of the ConstLabels and fqName. This +	// must be unique among all registered descriptors and can therefore be +	// used as an identifier of the descriptor. +	id uint64 +	// dimHash is a hash of the label names (preset and variable) and the +	// Help string. Each Desc with the same fqName must have the same +	// dimHash. +	dimHash uint64 +	// err is an error that occurred during construction. It is reported on +	// registration time. +	err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { +	d := &Desc{ +		fqName:         fqName, +		help:           help, +		variableLabels: variableLabels, +	} +	if help == "" { +		d.err = errors.New("empty help string") +		return d +	} +	if !model.IsValidMetricName(model.LabelValue(fqName)) { +		d.err = fmt.Errorf("%q is not a valid metric name", fqName) +		return d +	} +	// labelValues contains the label values of const labels (in order of +	// their sorted label names) plus the fqName (at position 0). +	labelValues := make([]string, 1, len(constLabels)+1) +	labelValues[0] = fqName +	labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) +	labelNameSet := map[string]struct{}{} +	// First add only the const label names and sort them... +	for labelName := range constLabels { +		if !checkLabelName(labelName) { +			d.err = fmt.Errorf("%q is not a valid label name", labelName) +			return d +		} +		labelNames = append(labelNames, labelName) +		labelNameSet[labelName] = struct{}{} +	} +	sort.Strings(labelNames) +	// ... so that we can now add const label values in the order of their names. +	for _, labelName := range labelNames { +		labelValues = append(labelValues, constLabels[labelName]) +	} +	// Now add the variable label names, but prefix them with something that +	// cannot be in a regular label name. That prevents matching the label +	// dimension with a different mix between preset and variable labels. +	for _, labelName := range variableLabels { +		if !checkLabelName(labelName) { +			d.err = fmt.Errorf("%q is not a valid label name", labelName) +			return d +		} +		labelNames = append(labelNames, "$"+labelName) +		labelNameSet[labelName] = struct{}{} +	} +	if len(labelNames) != len(labelNameSet) { +		d.err = errors.New("duplicate label names") +		return d +	} +	vh := hashNew() +	for _, val := range labelValues { +		vh = hashAdd(vh, val) +		vh = hashAddByte(vh, separatorByte) +	} +	d.id = vh +	// Sort labelNames so that order doesn't matter for the hash. +	sort.Strings(labelNames) +	// Now hash together (in this order) the help string and the sorted +	// label names. +	lh := hashNew() +	lh = hashAdd(lh, help) +	lh = hashAddByte(lh, separatorByte) +	for _, labelName := range labelNames { +		lh = hashAdd(lh, labelName) +		lh = hashAddByte(lh, separatorByte) +	} +	d.dimHash = lh + +	d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) +	for n, v := range constLabels { +		d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ +			Name:  proto.String(n), +			Value: proto.String(v), +		}) +	} +	sort.Sort(LabelPairSorter(d.constLabelPairs)) +	return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { +	return &Desc{ +		err: err, +	} +} + +func (d *Desc) String() string { +	lpStrings := make([]string, 0, len(d.constLabelPairs)) +	for _, lp := range d.constLabelPairs { +		lpStrings = append( +			lpStrings, +			fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), +		) +	} +	return fmt.Sprintf( +		"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", +		d.fqName, +		d.help, +		strings.Join(lpStrings, ","), +		d.variableLabels, +	) +} + +func checkLabelName(l string) bool { +	return model.LabelName(l).IsValid() && +		!strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 0000000..618c4de --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,181 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). +// +// All exported functions and methods are safe to be used concurrently unless +//specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +//    package main +// +//    import ( +//    	"net/http" +// +//    	"github.com/prometheus/client_golang/prometheus" +//    	"github.com/prometheus/client_golang/prometheus/promhttp" +//    ) +// +//    var ( +//    	cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +//    		Name: "cpu_temperature_celsius", +//    		Help: "Current temperature of the CPU.", +//    	}) +//    	hdFailures = prometheus.NewCounterVec( +//    		prometheus.CounterOpts{ +//    			Name: "hd_errors_total", +//    			Help: "Number of hard-disk errors.", +//    		}, +//    		[]string{"device"}, +//    	) +//    ) +// +//    func init() { +//    	// Metrics have to be registered to be exposed: +//    	prometheus.MustRegister(cpuTemp) +//    	prometheus.MustRegister(hdFailures) +//    } +// +//    func main() { +//    	cpuTemp.Set(65.3) +//    	hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +//    	// The Handler function provides a default handler to expose metrics +//    	// via an HTTP server. "/metrics" is the usual endpoint for that. +//    	http.Handle("/metrics", promhttp.Handler()) +//      log.Fatal(http.ListenAndServe(":8080", nil)) +//    } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, +// HistogramOpts, or UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might +// cause. As suggested by the name, MustRegister panics if an error occurs. With +// the Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data +// model. Inconsistencies are ideally detected at registration time, not at +// collect time. The former will usually be detected at start-up time of a +// program, while the latter will only happen at scrape time, possibly not even +// on the first scrape if the inconsistency only becomes relevant later. That is +// the main reason why a Collector and a Metric have to describe themselves to +// the registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in +// the same way on a custom registry as the global functions Register and +// Unregister on the default registry. +// +// There are a number of uses for custom registries: You can use registries +// with special properties, see NewPedanticRegistry. You can avoid global state, +// as it is imposed by the DefaultRegistry. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegistry comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp +// sub-package. (The top-level functions in the prometheus package are +// deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added. Sending metrics to +// Graphite would be an example that will soon be implemented. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 0000000..18a99d5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"encoding/json" +	"expvar" +) + +type expvarCollector struct { +	exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { +	return &expvarCollector{ +		exports: exports, +	} +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { +	for _, desc := range e.exports { +		ch <- desc +	} +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { +	for name, desc := range e.exports { +		var m Metric +		expVar := expvar.Get(name) +		if expVar == nil { +			continue +		} +		var v interface{} +		labels := make([]string, len(desc.variableLabels)) +		if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { +			ch <- NewInvalidMetric(desc, err) +			continue +		} +		var processValue func(v interface{}, i int) +		processValue = func(v interface{}, i int) { +			if i >= len(labels) { +				copiedLabels := append(make([]string, 0, len(labels)), labels...) +				switch v := v.(type) { +				case float64: +					m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) +				case bool: +					if v { +						m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) +					} else { +						m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) +					} +				default: +					return +				} +				ch <- m +				return +			} +			vm, ok := v.(map[string]interface{}) +			if !ok { +				return +			} +			for lv, val := range vm { +				labels[i] = lv +				processValue(val, i+1) +			} +		} +		processValue(v, 0) +	} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 0000000..e3b67df --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( +	offset64 = 14695981039346656037 +	prime64  = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { +	return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { +	for i := 0; i < len(s); i++ { +		h ^= uint64(s[i]) +		h *= prime64 +	} +	return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { +	h ^= uint64(b) +	h *= prime64 +	return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 0000000..9ab5a3d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,145 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { +	Metric +	Collector + +	// Set sets the Gauge to an arbitrary value. +	Set(float64) +	// Inc increments the Gauge by 1. Use Add to increment it by arbitrary +	// values. +	Inc() +	// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary +	// values. +	Dec() +	// Add adds the given value to the Gauge. (The value can be negative, +	// resulting in a decrease of the Gauge.) +	Add(float64) +	// Sub subtracts the given value from the Gauge. (The value can be +	// negative, resulting in an increase of the Gauge.) +	Sub(float64) + +	// SetToCurrentTime sets the Gauge to the current Unix time in seconds. +	SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { +	return newValue(NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		nil, +		opts.ConstLabels, +	), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { +	*MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { +	desc := NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		labelNames, +		opts.ConstLabels, +	) +	return &GaugeVec{ +		MetricVec: newMetricVec(desc, func(lvs ...string) Metric { +			return newValue(desc, GaugeValue, 0, lvs...) +		}), +	} +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { +	metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +	if metric != nil { +		return metric.(Gauge), err +	} +	return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { +	metric, err := m.MetricVec.GetMetricWith(labels) +	if metric != nil { +		return metric.(Gauge), err +	} +	return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +//     myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { +	return m.MetricVec.WithLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { +	return m.MetricVec.With(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { +	Metric +	Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { +	return newValueFunc(NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		nil, +		opts.ConstLabels, +	), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 0000000..6dea674 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,263 @@ +package prometheus + +import ( +	"fmt" +	"runtime" +	"runtime/debug" +	"time" +) + +type goCollector struct { +	goroutines Gauge +	gcDesc     *Desc + +	// metrics to describe and collect +	metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() Collector { +	return &goCollector{ +		goroutines: NewGauge(GaugeOpts{ +			Namespace: "go", +			Name:      "goroutines", +			Help:      "Number of goroutines that currently exist.", +		}), +		gcDesc: NewDesc( +			"go_gc_duration_seconds", +			"A summary of the GC invocation durations.", +			nil, nil), +		metrics: memStatsMetrics{ +			{ +				desc: NewDesc( +					memstatNamespace("alloc_bytes"), +					"Number of bytes allocated and still in use.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("alloc_bytes_total"), +					"Total number of bytes allocated, even if freed.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, +				valType: CounterValue, +			}, { +				desc: NewDesc( +					memstatNamespace("sys_bytes"), +					"Number of bytes obtained from system.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("lookups_total"), +					"Total number of pointer lookups.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, +				valType: CounterValue, +			}, { +				desc: NewDesc( +					memstatNamespace("mallocs_total"), +					"Total number of mallocs.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, +				valType: CounterValue, +			}, { +				desc: NewDesc( +					memstatNamespace("frees_total"), +					"Total number of frees.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, +				valType: CounterValue, +			}, { +				desc: NewDesc( +					memstatNamespace("heap_alloc_bytes"), +					"Number of heap bytes allocated and still in use.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("heap_sys_bytes"), +					"Number of heap bytes obtained from system.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("heap_idle_bytes"), +					"Number of heap bytes waiting to be used.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("heap_inuse_bytes"), +					"Number of heap bytes that are in use.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("heap_released_bytes"), +					"Number of heap bytes released to OS.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("heap_objects"), +					"Number of allocated objects.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("stack_inuse_bytes"), +					"Number of bytes in use by the stack allocator.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("stack_sys_bytes"), +					"Number of bytes obtained from system for stack allocator.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("mspan_inuse_bytes"), +					"Number of bytes in use by mspan structures.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("mspan_sys_bytes"), +					"Number of bytes used for mspan structures obtained from system.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("mcache_inuse_bytes"), +					"Number of bytes in use by mcache structures.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("mcache_sys_bytes"), +					"Number of bytes used for mcache structures obtained from system.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("buck_hash_sys_bytes"), +					"Number of bytes used by the profiling bucket hash table.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("gc_sys_bytes"), +					"Number of bytes used for garbage collection system metadata.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("other_sys_bytes"), +					"Number of bytes used for other system allocations.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("next_gc_bytes"), +					"Number of heap bytes when next garbage collection will take place.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, +				valType: GaugeValue, +			}, { +				desc: NewDesc( +					memstatNamespace("last_gc_time_seconds"), +					"Number of seconds since 1970 of last garbage collection.", +					nil, nil, +				), +				eval:    func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, +				valType: GaugeValue, +			}, +		}, +	} +} + +func memstatNamespace(s string) string { +	return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { +	ch <- c.goroutines.Desc() +	ch <- c.gcDesc + +	for _, i := range c.metrics { +		ch <- i.desc +	} +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { +	c.goroutines.Set(float64(runtime.NumGoroutine())) +	ch <- c.goroutines + +	var stats debug.GCStats +	stats.PauseQuantiles = make([]time.Duration, 5) +	debug.ReadGCStats(&stats) + +	quantiles := make(map[float64]float64) +	for idx, pq := range stats.PauseQuantiles[1:] { +		quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() +	} +	quantiles[0.0] = stats.PauseQuantiles[0].Seconds() +	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + +	ms := &runtime.MemStats{} +	runtime.ReadMemStats(ms) +	for _, i := range c.metrics { +		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) +	} +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { +	desc    *Desc +	eval    func(*runtime.MemStats) float64 +	valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 0000000..9719e8f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,444 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"fmt" +	"math" +	"sort" +	"sync/atomic" + +	"github.com/golang/protobuf/proto" + +	dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { +	Metric +	Collector + +	// Observe adds a single observation to the histogram. +	Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( +	DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + +	errBucketLabelNotAllowed = fmt.Errorf( +		"%q is not allowed as label name in histograms", bucketLabel, +	) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { +	if count < 1 { +		panic("LinearBuckets needs a positive count") +	} +	buckets := make([]float64, count) +	for i := range buckets { +		buckets[i] = start +		start += width +	} +	return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { +	if count < 1 { +		panic("ExponentialBuckets needs a positive count") +	} +	if start <= 0 { +		panic("ExponentialBuckets needs a positive start value") +	} +	if factor <= 1 { +		panic("ExponentialBuckets needs a factor greater than 1") +	} +	buckets := make([]float64, count) +	for i := range buckets { +		buckets[i] = start +		start *= factor +	} +	return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { +	// Namespace, Subsystem, and Name are components of the fully-qualified +	// name of the Histogram (created by joining these components with +	// "_"). Only Name is mandatory, the others merely help structuring the +	// name. Note that the fully-qualified name of the Histogram must be a +	// valid Prometheus metric name. +	Namespace string +	Subsystem string +	Name      string + +	// Help provides information about this Histogram. Mandatory! +	// +	// Metrics with the same fully-qualified name must have the same Help +	// string. +	Help string + +	// ConstLabels are used to attach fixed labels to this +	// Histogram. Histograms with the same fully-qualified name must have the +	// same label names in their ConstLabels. +	// +	// Note that in most cases, labels have a value that varies during the +	// lifetime of a process. Those labels are usually managed with a +	// HistogramVec. ConstLabels serve only special purposes. One is for the +	// special case where the value of a label does not change during the +	// lifetime of a process, e.g. if the revision of the running binary is +	// put into a label. Another, more advanced purpose is if more than one +	// Collector needs to collect Histograms with the same fully-qualified +	// name. In that case, those Summaries must differ in the values of +	// their ConstLabels. See the Collector examples. +	// +	// If the value of a label never changes (not even between binaries), +	// that label most likely should not be a label at all (but part of the +	// metric name). +	ConstLabels Labels + +	// Buckets defines the buckets into which observations are counted. Each +	// element in the slice is the upper inclusive bound of a bucket. The +	// values must be sorted in strictly increasing order. There is no need +	// to add a highest bucket with +Inf bound, it will be added +	// implicitly. The default value is DefBuckets. +	Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { +	return newHistogram( +		NewDesc( +			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +			opts.Help, +			nil, +			opts.ConstLabels, +		), +		opts, +	) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { +	if len(desc.variableLabels) != len(labelValues) { +		panic(errInconsistentCardinality) +	} + +	for _, n := range desc.variableLabels { +		if n == bucketLabel { +			panic(errBucketLabelNotAllowed) +		} +	} +	for _, lp := range desc.constLabelPairs { +		if lp.GetName() == bucketLabel { +			panic(errBucketLabelNotAllowed) +		} +	} + +	if len(opts.Buckets) == 0 { +		opts.Buckets = DefBuckets +	} + +	h := &histogram{ +		desc:        desc, +		upperBounds: opts.Buckets, +		labelPairs:  makeLabelPairs(desc, labelValues), +	} +	for i, upperBound := range h.upperBounds { +		if i < len(h.upperBounds)-1 { +			if upperBound >= h.upperBounds[i+1] { +				panic(fmt.Errorf( +					"histogram buckets must be in increasing order: %f >= %f", +					upperBound, h.upperBounds[i+1], +				)) +			} +		} else { +			if math.IsInf(upperBound, +1) { +				// The +Inf bucket is implicit. Remove it here. +				h.upperBounds = h.upperBounds[:i] +			} +		} +	} +	// Finally we know the final length of h.upperBounds and can make counts. +	h.counts = make([]uint64, len(h.upperBounds)) + +	h.init(h) // Init self-collection. +	return h +} + +type histogram struct { +	// sumBits contains the bits of the float64 representing the sum of all +	// observations. sumBits and count have to go first in the struct to +	// guarantee alignment for atomic operations. +	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG +	sumBits uint64 +	count   uint64 + +	selfCollector +	// Note that there is no mutex required. + +	desc *Desc + +	upperBounds []float64 +	counts      []uint64 + +	labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { +	return h.desc +} + +func (h *histogram) Observe(v float64) { +	// TODO(beorn7): For small numbers of buckets (<30), a linear search is +	// slightly faster than the binary search. If we really care, we could +	// switch from one search strategy to the other depending on the number +	// of buckets. +	// +	// Microbenchmarks (BenchmarkHistogramNoLabels): +	// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op +	// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op +	// 300 buckets: 154 ns/op linear - binary 61.6 ns/op +	i := sort.SearchFloat64s(h.upperBounds, v) +	if i < len(h.counts) { +		atomic.AddUint64(&h.counts[i], 1) +	} +	atomic.AddUint64(&h.count, 1) +	for { +		oldBits := atomic.LoadUint64(&h.sumBits) +		newBits := math.Float64bits(math.Float64frombits(oldBits) + v) +		if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { +			break +		} +	} +} + +func (h *histogram) Write(out *dto.Metric) error { +	his := &dto.Histogram{} +	buckets := make([]*dto.Bucket, len(h.upperBounds)) + +	his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) +	his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) +	var count uint64 +	for i, upperBound := range h.upperBounds { +		count += atomic.LoadUint64(&h.counts[i]) +		buckets[i] = &dto.Bucket{ +			CumulativeCount: proto.Uint64(count), +			UpperBound:      proto.Float64(upperBound), +		} +	} +	his.Bucket = buckets +	out.Histogram = his +	out.Label = h.labelPairs +	return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { +	*MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { +	desc := NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		labelNames, +		opts.ConstLabels, +	) +	return &HistogramVec{ +		MetricVec: newMetricVec(desc, func(lvs ...string) Metric { +			return newHistogram(desc, opts, lvs...) +		}), +	} +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Histogram and not a +// Metric so that no type conversion is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { +	metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +	if metric != nil { +		return metric.(Histogram), err +	} +	return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Histogram and not a Metric so that no +// type conversion is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { +	metric, err := m.MetricVec.GetMetricWith(labels) +	if metric != nil { +		return metric.(Histogram), err +	} +	return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +//     myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { +	return m.MetricVec.WithLabelValues(lvs...).(Histogram) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +//     myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Histogram { +	return m.MetricVec.With(labels).(Histogram) +} + +type constHistogram struct { +	desc       *Desc +	count      uint64 +	sum        float64 +	buckets    map[float64]uint64 +	labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { +	return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { +	his := &dto.Histogram{} +	buckets := make([]*dto.Bucket, 0, len(h.buckets)) + +	his.SampleCount = proto.Uint64(h.count) +	his.SampleSum = proto.Float64(h.sum) + +	for upperBound, count := range h.buckets { +		buckets = append(buckets, &dto.Bucket{ +			CumulativeCount: proto.Uint64(count), +			UpperBound:      proto.Float64(upperBound), +		}) +	} + +	if len(buckets) > 0 { +		sort.Sort(buckSort(buckets)) +	} +	his.Bucket = buckets + +	out.Histogram = his +	out.Label = h.labelPairs + +	return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( +	desc *Desc, +	count uint64, +	sum float64, +	buckets map[float64]uint64, +	labelValues ...string, +) (Metric, error) { +	if len(desc.variableLabels) != len(labelValues) { +		return nil, errInconsistentCardinality +	} +	return &constHistogram{ +		desc:       desc, +		count:      count, +		sum:        sum, +		buckets:    buckets, +		labelPairs: makeLabelPairs(desc, labelValues), +	}, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( +	desc *Desc, +	count uint64, +	sum float64, +	buckets map[float64]uint64, +	labelValues ...string, +) Metric { +	m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) +	if err != nil { +		panic(err) +	} +	return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { +	return len(s) +} + +func (s buckSort) Swap(i, j int) { +	s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { +	return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 0000000..d74fb48 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,526 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"bufio" +	"bytes" +	"compress/gzip" +	"fmt" +	"io" +	"net" +	"net/http" +	"strconv" +	"strings" +	"sync" +	"time" + +	"github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( +	contentTypeHeader     = "Content-Type" +	contentLengthHeader   = "Content-Length" +	contentEncodingHeader = "Content-Encoding" +	acceptEncodingHeader  = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { +	buf := bufPool.Get() +	if buf == nil { +		return &bytes.Buffer{} +	} +	return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { +	buf.Reset() +	bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead +// (which is not instrumented). +func Handler() http.Handler { +	return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { +	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { +		mfs, err := DefaultGatherer.Gather() +		if err != nil { +			http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) +			return +		} + +		contentType := expfmt.Negotiate(req.Header) +		buf := getBuf() +		defer giveBuf(buf) +		writer, encoding := decorateWriter(req, buf) +		enc := expfmt.NewEncoder(writer, contentType) +		var lastErr error +		for _, mf := range mfs { +			if err := enc.Encode(mf); err != nil { +				lastErr = err +				http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) +				return +			} +		} +		if closer, ok := writer.(io.Closer); ok { +			closer.Close() +		} +		if lastErr != nil && buf.Len() == 0 { +			http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) +			return +		} +		header := w.Header() +		header.Set(contentTypeHeader, string(contentType)) +		header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) +		if encoding != "" { +			header.Set(contentEncodingHeader, encoding) +		} +		w.Write(buf.Bytes()) +	}) +} + +// decorateWriter wraps a writer to handle gzip compression if requested.  It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { +	header := request.Header.Get(acceptEncodingHeader) +	parts := strings.Split(header, ",") +	for _, part := range parts { +		part := strings.TrimSpace(part) +		if part == "gzip" || strings.HasPrefix(part, "gzip;") { +			return gzip.NewWriter(writer), "gzip" +		} +	} +	return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { +	Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { +	return n() +} + +var now nower = nowFunc(func() time.Time { +	return time.Now() +}) + +func nowSeries(t ...time.Time) nower { +	return nowFunc(func() time.Time { +		defer func() { +			t = t[1:] +		}() + +		return t[0] +	}) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. +// +// Upcoming versions of this package will provide ways of instrumenting HTTP +// handlers that are more flexible and have fewer issues. Please prefer direct +// instrumentation in the meantime. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { +	return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { +	return InstrumentHandlerFuncWithOpts( +		SummaryOpts{ +			Subsystem:   "http", +			ConstLabels: Labels{"handler": handlerName}, +			Objectives:  map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, +		}, +		handlerFunc, +	) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +//     prometheus.InstrumentHandlerWithOpts( +//         prometheus.SummaryOpts{ +//              Subsystem:   "http", +//              ConstLabels: prometheus.Labels{"handler": handlerName}, +//         }, +//         handler, +//     ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { +	return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { +	reqCnt := NewCounterVec( +		CounterOpts{ +			Namespace:   opts.Namespace, +			Subsystem:   opts.Subsystem, +			Name:        "requests_total", +			Help:        "Total number of HTTP requests made.", +			ConstLabels: opts.ConstLabels, +		}, +		instLabels, +	) +	if err := Register(reqCnt); err != nil { +		if are, ok := err.(AlreadyRegisteredError); ok { +			reqCnt = are.ExistingCollector.(*CounterVec) +		} else { +			panic(err) +		} +	} + +	opts.Name = "request_duration_microseconds" +	opts.Help = "The HTTP request latencies in microseconds." +	reqDur := NewSummary(opts) +	if err := Register(reqDur); err != nil { +		if are, ok := err.(AlreadyRegisteredError); ok { +			reqDur = are.ExistingCollector.(Summary) +		} else { +			panic(err) +		} +	} + +	opts.Name = "request_size_bytes" +	opts.Help = "The HTTP request sizes in bytes." +	reqSz := NewSummary(opts) +	if err := Register(reqSz); err != nil { +		if are, ok := err.(AlreadyRegisteredError); ok { +			reqSz = are.ExistingCollector.(Summary) +		} else { +			panic(err) +		} +	} + +	opts.Name = "response_size_bytes" +	opts.Help = "The HTTP response sizes in bytes." +	resSz := NewSummary(opts) +	if err := Register(resSz); err != nil { +		if are, ok := err.(AlreadyRegisteredError); ok { +			resSz = are.ExistingCollector.(Summary) +		} else { +			panic(err) +		} +	} + +	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +		now := time.Now() + +		delegate := &responseWriterDelegator{ResponseWriter: w} +		out := computeApproximateRequestSize(r) + +		_, cn := w.(http.CloseNotifier) +		_, fl := w.(http.Flusher) +		_, hj := w.(http.Hijacker) +		_, rf := w.(io.ReaderFrom) +		var rw http.ResponseWriter +		if cn && fl && hj && rf { +			rw = &fancyResponseWriterDelegator{delegate} +		} else { +			rw = delegate +		} +		handlerFunc(rw, r) + +		elapsed := float64(time.Since(now)) / float64(time.Microsecond) + +		method := sanitizeMethod(r.Method) +		code := sanitizeCode(delegate.status) +		reqCnt.WithLabelValues(method, code).Inc() +		reqDur.Observe(elapsed) +		resSz.Observe(float64(delegate.written)) +		reqSz.Observe(float64(<-out)) +	}) +} + +func computeApproximateRequestSize(r *http.Request) <-chan int { +	// Get URL length in current go routine for avoiding a race condition. +	// HandlerFunc that runs in parallel may modify the URL. +	s := 0 +	if r.URL != nil { +		s += len(r.URL.String()) +	} + +	out := make(chan int, 1) + +	go func() { +		s += len(r.Method) +		s += len(r.Proto) +		for name, values := range r.Header { +			s += len(name) +			for _, value := range values { +				s += len(value) +			} +		} +		s += len(r.Host) + +		// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + +		if r.ContentLength != -1 { +			s += int(r.ContentLength) +		} +		out <- s +		close(out) +	}() + +	return out +} + +type responseWriterDelegator struct { +	http.ResponseWriter + +	handler, method string +	status          int +	written         int64 +	wroteHeader     bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { +	r.status = code +	r.wroteHeader = true +	r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { +	if !r.wroteHeader { +		r.WriteHeader(http.StatusOK) +	} +	n, err := r.ResponseWriter.Write(b) +	r.written += int64(n) +	return n, err +} + +type fancyResponseWriterDelegator struct { +	*responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { +	return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { +	f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { +	return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { +	if !f.wroteHeader { +		f.WriteHeader(http.StatusOK) +	} +	n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) +	f.written += n +	return n, err +} + +func sanitizeMethod(m string) string { +	switch m { +	case "GET", "get": +		return "get" +	case "PUT", "put": +		return "put" +	case "HEAD", "head": +		return "head" +	case "POST", "post": +		return "post" +	case "DELETE", "delete": +		return "delete" +	case "CONNECT", "connect": +		return "connect" +	case "OPTIONS", "options": +		return "options" +	case "NOTIFY", "notify": +		return "notify" +	default: +		return strings.ToLower(m) +	} +} + +func sanitizeCode(s int) string { +	switch s { +	case 100: +		return "100" +	case 101: +		return "101" + +	case 200: +		return "200" +	case 201: +		return "201" +	case 202: +		return "202" +	case 203: +		return "203" +	case 204: +		return "204" +	case 205: +		return "205" +	case 206: +		return "206" + +	case 300: +		return "300" +	case 301: +		return "301" +	case 302: +		return "302" +	case 304: +		return "304" +	case 305: +		return "305" +	case 307: +		return "307" + +	case 400: +		return "400" +	case 401: +		return "401" +	case 402: +		return "402" +	case 403: +		return "403" +	case 404: +		return "404" +	case 405: +		return "405" +	case 406: +		return "406" +	case 407: +		return "407" +	case 408: +		return "408" +	case 409: +		return "409" +	case 410: +		return "410" +	case 411: +		return "411" +	case 412: +		return "412" +	case 413: +		return "413" +	case 414: +		return "414" +	case 415: +		return "415" +	case 416: +		return "416" +	case 417: +		return "417" +	case 418: +		return "418" + +	case 500: +		return "500" +	case 501: +		return "501" +	case 502: +		return "502" +	case 503: +		return "503" +	case 504: +		return "504" +	case 505: +		return "505" + +	case 428: +		return "428" +	case 429: +		return "429" +	case 431: +		return "431" +	case 511: +		return "511" + +	default: +		return strconv.Itoa(s) +	} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 0000000..d4063d9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"strings" + +	dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { +	// Desc returns the descriptor for the Metric. This method idempotently +	// returns the same descriptor throughout the lifetime of the +	// Metric. The returned descriptor is immutable by contract. A Metric +	// unable to describe itself must return an invalid descriptor (created +	// with NewInvalidDesc). +	Desc() *Desc +	// Write encodes the Metric into a "Metric" Protocol Buffer data +	// transmission object. +	// +	// Metric implementations must observe concurrency safety as reads of +	// this metric may occur at any time, and any blocking occurs at the +	// expense of total performance of rendering all registered +	// metrics. Ideally, Metric implementations should support concurrent +	// readers. +	// +	// While populating dto.Metric, it is the responsibility of the +	// implementation to ensure validity of the Metric protobuf (like valid +	// UTF-8 strings or syntactically valid metric and label names). It is +	// recommended to sort labels lexicographically. (Implementers may find +	// LabelPairSorter useful for that.) Callers of Write should still make +	// sure of sorting if they depend on it. +	Write(*dto.Metric) error +	// TODO(beorn7): The original rationale of passing in a pre-allocated +	// dto.Metric protobuf to save allocations has disappeared. The +	// signature of this method should be changed to "Write() (*dto.Metric, +	// error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { +	// Namespace, Subsystem, and Name are components of the fully-qualified +	// name of the Metric (created by joining these components with +	// "_"). Only Name is mandatory, the others merely help structuring the +	// name. Note that the fully-qualified name of the metric must be a +	// valid Prometheus metric name. +	Namespace string +	Subsystem string +	Name      string + +	// Help provides information about this metric. Mandatory! +	// +	// Metrics with the same fully-qualified name must have the same Help +	// string. +	Help string + +	// ConstLabels are used to attach fixed labels to this metric. Metrics +	// with the same fully-qualified name must have the same label names in +	// their ConstLabels. +	// +	// Note that in most cases, labels have a value that varies during the +	// lifetime of a process. Those labels are usually managed with a metric +	// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels +	// serve only special purposes. One is for the special case where the +	// value of a label does not change during the lifetime of a process, +	// e.g. if the revision of the running binary is put into a +	// label. Another, more advanced purpose is if more than one Collector +	// needs to collect Metrics with the same fully-qualified name. In that +	// case, those Metrics must differ in the values of their +	// ConstLabels. See the Collector examples. +	// +	// If the value of a label never changes (not even between binaries), +	// that label most likely should not be a label at all (but part of the +	// metric name). +	ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { +	if name == "" { +		return "" +	} +	switch { +	case namespace != "" && subsystem != "": +		return strings.Join([]string{namespace, subsystem, name}, "_") +	case namespace != "": +		return strings.Join([]string{namespace, name}, "_") +	case subsystem != "": +		return strings.Join([]string{subsystem, name}, "_") +	} +	return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { +	return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { +	s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { +	return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { +	return len(s) +} + +func (s hashSorter) Swap(i, j int) { +	s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { +	return s[i] < s[j] +} + +type invalidMetric struct { +	desc *Desc +	err  error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { +	return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 0000000..94b2553 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,140 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { +	pid             int +	collectFn       func(chan<- Metric) +	pidFn           func() (int, error) +	cpuTotal        *Desc +	openFDs, maxFDs *Desc +	vsize, rss      *Desc +	startTime       *Desc +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) Collector { +	return NewProcessCollectorPIDFn( +		func() (int, error) { return pid, nil }, +		namespace, +	) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( +	pidFn func() (int, error), +	namespace string, +) Collector { +	ns := "" +	if len(namespace) > 0 { +		ns = namespace + "_" +	} + +	c := processCollector{ +		pidFn:     pidFn, +		collectFn: func(chan<- Metric) {}, + +		cpuTotal: NewDesc( +			ns+"process_cpu_seconds_total", +			"Total user and system CPU time spent in seconds.", +			nil, nil, +		), +		openFDs: NewDesc( +			ns+"process_open_fds", +			"Number of open file descriptors.", +			nil, nil, +		), +		maxFDs: NewDesc( +			ns+"process_max_fds", +			"Maximum number of open file descriptors.", +			nil, nil, +		), +		vsize: NewDesc( +			ns+"process_virtual_memory_bytes", +			"Virtual memory size in bytes.", +			nil, nil, +		), +		rss: NewDesc( +			ns+"process_resident_memory_bytes", +			"Resident memory size in bytes.", +			nil, nil, +		), +		startTime: NewDesc( +			ns+"process_start_time_seconds", +			"Start time of the process since unix epoch in seconds.", +			nil, nil, +		), +	} + +	// Set up process metric collection if supported by the runtime. +	if _, err := procfs.NewStat(); err == nil { +		c.collectFn = c.processCollect +	} + +	return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { +	ch <- c.cpuTotal +	ch <- c.openFDs +	ch <- c.maxFDs +	ch <- c.vsize +	ch <- c.rss +	ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { +	c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { +	pid, err := c.pidFn() +	if err != nil { +		return +	} + +	p, err := procfs.NewProc(pid) +	if err != nil { +		return +	} + +	if stat, err := p.NewStat(); err == nil { +		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) +		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) +		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) +		if startTime, err := stat.StartTime(); err == nil { +			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) +		} +	} + +	if fds, err := p.FileDescriptorsLen(); err == nil { +		ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) +	} + +	if limits, err := p.NewLimits(); err == nil { +		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) +	} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 0000000..b6dd5a2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,201 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package promhttp contains functions to create http.Handler instances to +// expose Prometheus metrics via HTTP. In later versions of this package, it +// will also contain tooling to instrument instances of http.Handler and +// http.RoundTripper. +// +// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor, +// you can create a handler for a custom registry or anything that implements +// the Gatherer interface. It also allows to create handlers that act +// differently on errors or allow to log errors. +package promhttp + +import ( +	"bytes" +	"compress/gzip" +	"fmt" +	"io" +	"net/http" +	"strings" +	"sync" + +	"github.com/prometheus/common/expfmt" + +	"github.com/prometheus/client_golang/prometheus" +) + +const ( +	contentTypeHeader     = "Content-Type" +	contentLengthHeader   = "Content-Length" +	contentEncodingHeader = "Content-Encoding" +	acceptEncodingHeader  = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { +	buf := bufPool.Get() +	if buf == nil { +		return &bytes.Buffer{} +	} +	return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { +	buf.Reset() +	bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The +// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP +// error, no error logging, and compression if requested by the client. +// +// If you want to create a Handler for the DefaultGatherer with different +// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and +// your desired HandlerOpts. +func Handler() http.Handler { +	return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) +} + +// HandlerFor returns an http.Handler for the provided Gatherer. The behavior +// of the Handler is defined by the provided HandlerOpts. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { +	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { +		mfs, err := reg.Gather() +		if err != nil { +			if opts.ErrorLog != nil { +				opts.ErrorLog.Println("error gathering metrics:", err) +			} +			switch opts.ErrorHandling { +			case PanicOnError: +				panic(err) +			case ContinueOnError: +				if len(mfs) == 0 { +					http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) +					return +				} +			case HTTPErrorOnError: +				http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) +				return +			} +		} + +		contentType := expfmt.Negotiate(req.Header) +		buf := getBuf() +		defer giveBuf(buf) +		writer, encoding := decorateWriter(req, buf, opts.DisableCompression) +		enc := expfmt.NewEncoder(writer, contentType) +		var lastErr error +		for _, mf := range mfs { +			if err := enc.Encode(mf); err != nil { +				lastErr = err +				if opts.ErrorLog != nil { +					opts.ErrorLog.Println("error encoding metric family:", err) +				} +				switch opts.ErrorHandling { +				case PanicOnError: +					panic(err) +				case ContinueOnError: +					// Handled later. +				case HTTPErrorOnError: +					http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) +					return +				} +			} +		} +		if closer, ok := writer.(io.Closer); ok { +			closer.Close() +		} +		if lastErr != nil && buf.Len() == 0 { +			http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) +			return +		} +		header := w.Header() +		header.Set(contentTypeHeader, string(contentType)) +		header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) +		if encoding != "" { +			header.Set(contentEncodingHeader, encoding) +		} +		w.Write(buf.Bytes()) +		// TODO(beorn7): Consider streaming serving of metrics. +	}) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( +	// Serve an HTTP status code 500 upon the first error +	// encountered. Report the error message in the body. +	HTTPErrorOnError HandlerErrorHandling = iota +	// Ignore errors and try to serve as many metrics as possible.  However, +	// if no metrics can be served, serve an HTTP status code 500 and the +	// last error message in the body. Only use this in deliberate "best +	// effort" metrics collection scenarios. It is recommended to at least +	// log errors (by providing an ErrorLog in HandlerOpts) to not mask +	// errors completely. +	ContinueOnError +	// Panic upon the first error encountered (useful for "crash only" apps). +	PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { +	Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { +	// ErrorLog specifies an optional logger for errors collecting and +	// serving metrics. If nil, errors are not logged at all. +	ErrorLog Logger +	// ErrorHandling defines how errors are handled. Note that errors are +	// logged regardless of the configured ErrorHandling provided ErrorLog +	// is not nil. +	ErrorHandling HandlerErrorHandling +	// If DisableCompression is true, the handler will never compress the +	// response, even if requested by the client. +	DisableCompression bool +} + +// decorateWriter wraps a writer to handle gzip compression if requested.  It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { +	if compressionDisabled { +		return writer, "" +	} +	header := request.Header.Get(acceptEncodingHeader) +	parts := strings.Split(header, ",") +	for _, part := range parts { +		part := strings.TrimSpace(part) +		if part == "gzip" || strings.HasPrefix(part, "gzip;") { +			return gzip.NewWriter(writer), "gzip" +		} +	} +	return writer, "" +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 0000000..78d5f19 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,755 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"bytes" +	"errors" +	"fmt" +	"os" +	"sort" +	"sync" + +	"github.com/golang/protobuf/proto" + +	dto "github.com/prometheus/client_model/go" +) + +const ( +	// Capacity for the channel to collect metrics and descriptors. +	capMetricChan = 1000 +	capDescChan   = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (see NewProcessCollector) and a Go collector (see +// NewGoCollector) already registered. This approach to keep default instances +// as global state mirrors the approach of other packages in the Go standard +// library. Note that there are caveats. Change the variables with caution and +// only if you understand the consequences. Users who want to avoid global state +// altogether should not use the convenience function and act on custom +// instances instead. +var ( +	defaultRegistry              = NewRegistry() +	DefaultRegisterer Registerer = defaultRegistry +	DefaultGatherer   Gatherer   = defaultRegistry +) + +func init() { +	MustRegister(NewProcessCollector(os.Getpid(), "")) +	MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { +	return &Registry{ +		collectorsByID:  map[uint64]Collector{}, +		descIDs:         map[uint64]struct{}{}, +		dimHashesByName: map[string]uint64{}, +	} +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { +	r := NewRegistry() +	r.pedanticChecksEnabled = true +	return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather then the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { +	// Register registers a new Collector to be included in metrics +	// collection. It returns an error if the descriptors provided by the +	// Collector are invalid or if they — in combination with descriptors of +	// already registered Collectors — do not fulfill the consistency and +	// uniqueness criteria described in the documentation of metric.Desc. +	// +	// If the provided Collector is equal to a Collector already registered +	// (which includes the case of re-registering the same Collector), the +	// returned error is an instance of AlreadyRegisteredError, which +	// contains the previously registered Collector. +	// +	// It is in general not safe to register the same Collector multiple +	// times concurrently. +	Register(Collector) error +	// MustRegister works like Register but registers any number of +	// Collectors and panics upon the first registration that causes an +	// error. +	MustRegister(...Collector) +	// Unregister unregisters the Collector that equals the Collector passed +	// in as an argument.  (Two Collectors are considered equal if their +	// Describe method yields the same set of descriptors.) The function +	// returns whether a Collector was unregistered. +	// +	// Note that even after unregistering, it will not be possible to +	// register a new Collector that is inconsistent with the unregistered +	// Collector, e.g. a Collector collecting metrics with the same name but +	// a different help string. The rationale here is that the same registry +	// instance must only collect consistent metrics throughout its +	// lifetime. +	Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { +	// Gather calls the Collect method of the registered Collectors and then +	// gathers the collected metrics into a lexicographically sorted slice +	// of MetricFamily protobufs. Even if an error occurs, Gather attempts +	// to gather as many metrics as possible. Hence, if a non-nil error is +	// returned, the returned MetricFamily slice could be nil (in case of a +	// fatal error that prevented any meaningful metric collection) or +	// contain a number of MetricFamily protobufs, some of which might be +	// incomplete, and some might be missing altogether. The returned error +	// (which might be a MultiError) explains the details. In scenarios +	// where complete collection is critical, the returned MetricFamily +	// protobufs should be disregarded if the returned error is non-nil. +	Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { +	return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { +	DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { +	return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { +	return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { +	ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { +	return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { +	if len(errs) == 0 { +		return "" +	} +	buf := &bytes.Buffer{} +	fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) +	for _, err := range errs { +		fmt.Fprintf(buf, "\n* %s", err) +	} +	return buf.String() +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { +	switch len(errs) { +	case 0: +		return nil +	case 1: +		return errs[0] +	default: +		return errs +	} +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { +	mtx                   sync.RWMutex +	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs. +	descIDs               map[uint64]struct{} +	dimHashesByName       map[string]uint64 +	pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { +	var ( +		descChan           = make(chan *Desc, capDescChan) +		newDescIDs         = map[uint64]struct{}{} +		newDimHashesByName = map[string]uint64{} +		collectorID        uint64 // Just a sum of all desc IDs. +		duplicateDescErr   error +	) +	go func() { +		c.Describe(descChan) +		close(descChan) +	}() +	r.mtx.Lock() +	defer r.mtx.Unlock() +	// Coduct various tests... +	for desc := range descChan { + +		// Is the descriptor valid at all? +		if desc.err != nil { +			return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) +		} + +		// Is the descID unique? +		// (In other words: Is the fqName + constLabel combination unique?) +		if _, exists := r.descIDs[desc.id]; exists { +			duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) +		} +		// If it is not a duplicate desc in this collector, add it to +		// the collectorID.  (We allow duplicate descs within the same +		// collector, but their existence must be a no-op.) +		if _, exists := newDescIDs[desc.id]; !exists { +			newDescIDs[desc.id] = struct{}{} +			collectorID += desc.id +		} + +		// Are all the label names and the help string consistent with +		// previous descriptors of the same name? +		// First check existing descriptors... +		if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { +			if dimHash != desc.dimHash { +				return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) +			} +		} else { +			// ...then check the new descriptors already seen. +			if dimHash, exists := newDimHashesByName[desc.fqName]; exists { +				if dimHash != desc.dimHash { +					return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) +				} +			} else { +				newDimHashesByName[desc.fqName] = desc.dimHash +			} +		} +	} +	// Did anything happen at all? +	if len(newDescIDs) == 0 { +		return errors.New("collector has no descriptors") +	} +	if existing, exists := r.collectorsByID[collectorID]; exists { +		return AlreadyRegisteredError{ +			ExistingCollector: existing, +			NewCollector:      c, +		} +	} +	// If the collectorID is new, but at least one of the descs existed +	// before, we are in trouble. +	if duplicateDescErr != nil { +		return duplicateDescErr +	} + +	// Only after all tests have passed, actually register. +	r.collectorsByID[collectorID] = c +	for hash := range newDescIDs { +		r.descIDs[hash] = struct{}{} +	} +	for name, dimHash := range newDimHashesByName { +		r.dimHashesByName[name] = dimHash +	} +	return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { +	var ( +		descChan    = make(chan *Desc, capDescChan) +		descIDs     = map[uint64]struct{}{} +		collectorID uint64 // Just a sum of the desc IDs. +	) +	go func() { +		c.Describe(descChan) +		close(descChan) +	}() +	for desc := range descChan { +		if _, exists := descIDs[desc.id]; !exists { +			collectorID += desc.id +			descIDs[desc.id] = struct{}{} +		} +	} + +	r.mtx.RLock() +	if _, exists := r.collectorsByID[collectorID]; !exists { +		r.mtx.RUnlock() +		return false +	} +	r.mtx.RUnlock() + +	r.mtx.Lock() +	defer r.mtx.Unlock() + +	delete(r.collectorsByID, collectorID) +	for id := range descIDs { +		delete(r.descIDs, id) +	} +	// dimHashesByName is left untouched as those must be consistent +	// throughout the lifetime of a program. +	return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { +	for _, c := range cs { +		if err := r.Register(c); err != nil { +			panic(err) +		} +	} +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { +	var ( +		metricChan        = make(chan Metric, capMetricChan) +		metricHashes      = map[uint64]struct{}{} +		dimHashes         = map[string]uint64{} +		wg                sync.WaitGroup +		errs              MultiError          // The collected errors to return in the end. +		registeredDescIDs map[uint64]struct{} // Only used for pedantic checks +	) + +	r.mtx.RLock() +	metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + +	// Scatter. +	// (Collectors could be complex and slow, so we call them all at once.) +	wg.Add(len(r.collectorsByID)) +	go func() { +		wg.Wait() +		close(metricChan) +	}() +	for _, collector := range r.collectorsByID { +		go func(collector Collector) { +			defer wg.Done() +			collector.Collect(metricChan) +		}(collector) +	} + +	// In case pedantic checks are enabled, we have to copy the map before +	// giving up the RLock. +	if r.pedanticChecksEnabled { +		registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) +		for id := range r.descIDs { +			registeredDescIDs[id] = struct{}{} +		} +	} + +	r.mtx.RUnlock() + +	// Drain metricChan in case of premature return. +	defer func() { +		for range metricChan { +		} +	}() + +	// Gather. +	for metric := range metricChan { +		// This could be done concurrently, too, but it required locking +		// of metricFamiliesByName (and of metricHashes if checks are +		// enabled). Most likely not worth it. +		desc := metric.Desc() +		dtoMetric := &dto.Metric{} +		if err := metric.Write(dtoMetric); err != nil { +			errs = append(errs, fmt.Errorf( +				"error collecting metric %v: %s", desc, err, +			)) +			continue +		} +		metricFamily, ok := metricFamiliesByName[desc.fqName] +		if ok { +			if metricFamily.GetHelp() != desc.help { +				errs = append(errs, fmt.Errorf( +					"collected metric %s %s has help %q but should have %q", +					desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), +				)) +				continue +			} +			// TODO(beorn7): Simplify switch once Desc has type. +			switch metricFamily.GetType() { +			case dto.MetricType_COUNTER: +				if dtoMetric.Counter == nil { +					errs = append(errs, fmt.Errorf( +						"collected metric %s %s should be a Counter", +						desc.fqName, dtoMetric, +					)) +					continue +				} +			case dto.MetricType_GAUGE: +				if dtoMetric.Gauge == nil { +					errs = append(errs, fmt.Errorf( +						"collected metric %s %s should be a Gauge", +						desc.fqName, dtoMetric, +					)) +					continue +				} +			case dto.MetricType_SUMMARY: +				if dtoMetric.Summary == nil { +					errs = append(errs, fmt.Errorf( +						"collected metric %s %s should be a Summary", +						desc.fqName, dtoMetric, +					)) +					continue +				} +			case dto.MetricType_UNTYPED: +				if dtoMetric.Untyped == nil { +					errs = append(errs, fmt.Errorf( +						"collected metric %s %s should be Untyped", +						desc.fqName, dtoMetric, +					)) +					continue +				} +			case dto.MetricType_HISTOGRAM: +				if dtoMetric.Histogram == nil { +					errs = append(errs, fmt.Errorf( +						"collected metric %s %s should be a Histogram", +						desc.fqName, dtoMetric, +					)) +					continue +				} +			default: +				panic("encountered MetricFamily with invalid type") +			} +		} else { +			metricFamily = &dto.MetricFamily{} +			metricFamily.Name = proto.String(desc.fqName) +			metricFamily.Help = proto.String(desc.help) +			// TODO(beorn7): Simplify switch once Desc has type. +			switch { +			case dtoMetric.Gauge != nil: +				metricFamily.Type = dto.MetricType_GAUGE.Enum() +			case dtoMetric.Counter != nil: +				metricFamily.Type = dto.MetricType_COUNTER.Enum() +			case dtoMetric.Summary != nil: +				metricFamily.Type = dto.MetricType_SUMMARY.Enum() +			case dtoMetric.Untyped != nil: +				metricFamily.Type = dto.MetricType_UNTYPED.Enum() +			case dtoMetric.Histogram != nil: +				metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() +			default: +				errs = append(errs, fmt.Errorf( +					"empty metric collected: %s", dtoMetric, +				)) +				continue +			} +			metricFamiliesByName[desc.fqName] = metricFamily +		} +		if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { +			errs = append(errs, err) +			continue +		} +		if r.pedanticChecksEnabled { +			// Is the desc registered at all? +			if _, exist := registeredDescIDs[desc.id]; !exist { +				errs = append(errs, fmt.Errorf( +					"collected metric %s %s with unregistered descriptor %s", +					metricFamily.GetName(), dtoMetric, desc, +				)) +				continue +			} +			if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { +				errs = append(errs, err) +				continue +			} +		} +		metricFamily.Metric = append(metricFamily.Metric, dtoMetric) +	} +	return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { +	var ( +		metricFamiliesByName = map[string]*dto.MetricFamily{} +		metricHashes         = map[uint64]struct{}{} +		dimHashes            = map[string]uint64{} +		errs                 MultiError // The collected errors to return in the end. +	) + +	for i, g := range gs { +		mfs, err := g.Gather() +		if err != nil { +			if multiErr, ok := err.(MultiError); ok { +				for _, err := range multiErr { +					errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) +				} +			} else { +				errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) +			} +		} +		for _, mf := range mfs { +			existingMF, exists := metricFamiliesByName[mf.GetName()] +			if exists { +				if existingMF.GetHelp() != mf.GetHelp() { +					errs = append(errs, fmt.Errorf( +						"gathered metric family %s has help %q but should have %q", +						mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), +					)) +					continue +				} +				if existingMF.GetType() != mf.GetType() { +					errs = append(errs, fmt.Errorf( +						"gathered metric family %s has type %s but should have %s", +						mf.GetName(), mf.GetType(), existingMF.GetType(), +					)) +					continue +				} +			} else { +				existingMF = &dto.MetricFamily{} +				existingMF.Name = mf.Name +				existingMF.Help = mf.Help +				existingMF.Type = mf.Type +				metricFamiliesByName[mf.GetName()] = existingMF +			} +			for _, m := range mf.Metric { +				if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { +					errs = append(errs, err) +					continue +				} +				existingMF.Metric = append(existingMF.Metric, m) +			} +		} +	} +	return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { +	return len(s) +} + +func (s metricSorter) Swap(i, j int) { +	s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { +	if len(s[i].Label) != len(s[j].Label) { +		// This should not happen. The metrics are +		// inconsistent. However, we have to deal with the fact, as +		// people might use custom collectors or metric family injection +		// to create inconsistent metrics. So let's simply compare the +		// number of labels in this case. That will still yield +		// reproducible sorting. +		return len(s[i].Label) < len(s[j].Label) +	} +	for n, lp := range s[i].Label { +		vi := lp.GetValue() +		vj := s[j].Label[n].GetValue() +		if vi != vj { +			return vi < vj +		} +	} + +	// We should never arrive here. Multiple metrics with the same +	// label set in the same scrape will lead to undefined ingestion +	// behavior. However, as above, we have to provide stable sorting +	// here, even for inconsistent metrics. So sort equal metrics +	// by their timestamp, with missing timestamps (implying "now") +	// coming last. +	if s[i].TimestampMs == nil { +		return false +	} +	if s[j].TimestampMs == nil { +		return true +	} +	return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { +	for _, mf := range metricFamiliesByName { +		sort.Sort(metricSorter(mf.Metric)) +	} +	names := make([]string, 0, len(metricFamiliesByName)) +	for name, mf := range metricFamiliesByName { +		if len(mf.Metric) > 0 { +			names = append(names, name) +		} +	} +	sort.Strings(names) +	result := make([]*dto.MetricFamily, 0, len(names)) +	for _, name := range names { +		result = append(result, metricFamiliesByName[name]) +	} +	return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is alread in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( +	metricFamily *dto.MetricFamily, +	dtoMetric *dto.Metric, +	metricHashes map[uint64]struct{}, +	dimHashes map[string]uint64, +) error { +	// Type consistency with metric family. +	if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || +		metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || +		metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || +		metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || +		metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { +		return fmt.Errorf( +			"collected metric %s %s is not a %s", +			metricFamily.GetName(), dtoMetric, metricFamily.GetType(), +		) +	} + +	// Is the metric unique (i.e. no other metric with the same name and the same label values)? +	h := hashNew() +	h = hashAdd(h, metricFamily.GetName()) +	h = hashAddByte(h, separatorByte) +	dh := hashNew() +	// Make sure label pairs are sorted. We depend on it for the consistency +	// check. +	sort.Sort(LabelPairSorter(dtoMetric.Label)) +	for _, lp := range dtoMetric.Label { +		h = hashAdd(h, lp.GetValue()) +		h = hashAddByte(h, separatorByte) +		dh = hashAdd(dh, lp.GetName()) +		dh = hashAddByte(dh, separatorByte) +	} +	if _, exists := metricHashes[h]; exists { +		return fmt.Errorf( +			"collected metric %s %s was collected before with the same name and label values", +			metricFamily.GetName(), dtoMetric, +		) +	} +	if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { +		if dimHash != dh { +			return fmt.Errorf( +				"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", +				metricFamily.GetName(), dtoMetric, +			) +		} +	} else { +		dimHashes[metricFamily.GetName()] = dh +	} +	metricHashes[h] = struct{}{} +	return nil +} + +func checkDescConsistency( +	metricFamily *dto.MetricFamily, +	dtoMetric *dto.Metric, +	desc *Desc, +) error { +	// Desc help consistency with metric family help. +	if metricFamily.GetHelp() != desc.help { +		return fmt.Errorf( +			"collected metric %s %s has help %q but should have %q", +			metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, +		) +	} + +	// Is the desc consistent with the content of the metric? +	lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) +	lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) +	for _, l := range desc.variableLabels { +		lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ +			Name: proto.String(l), +		}) +	} +	if len(lpsFromDesc) != len(dtoMetric.Label) { +		return fmt.Errorf( +			"labels in collected metric %s %s are inconsistent with descriptor %s", +			metricFamily.GetName(), dtoMetric, desc, +		) +	} +	sort.Sort(LabelPairSorter(lpsFromDesc)) +	for i, lpFromDesc := range lpsFromDesc { +		lpFromMetric := dtoMetric.Label[i] +		if lpFromDesc.GetName() != lpFromMetric.GetName() || +			lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { +			return fmt.Errorf( +				"labels in collected metric %s %s are inconsistent with descriptor %s", +				metricFamily.GetName(), dtoMetric, desc, +			) +		} +	} +	return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 0000000..82b8850 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,543 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"fmt" +	"math" +	"sort" +	"sync" +	"time" + +	"github.com/beorn7/perks/quantile" +	"github.com/golang/protobuf/proto" + +	dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { +	Metric +	Collector + +	// Observe adds a single observation to the summary. +	Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. +var ( +	DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + +	errQuantileLabelNotAllowed = fmt.Errorf( +		"%q is not allowed as label name in summaries", quantileLabel, +	) +) + +// Default values for SummaryOpts. +const ( +	// DefMaxAge is the default duration for which observations stay +	// relevant. +	DefMaxAge time.Duration = 10 * time.Minute +	// DefAgeBuckets is the default number of buckets used to calculate the +	// age of observations. +	DefAgeBuckets = 5 +	// DefBufCap is the standard buffer size for collecting Summary observations. +	DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { +	// Namespace, Subsystem, and Name are components of the fully-qualified +	// name of the Summary (created by joining these components with +	// "_"). Only Name is mandatory, the others merely help structuring the +	// name. Note that the fully-qualified name of the Summary must be a +	// valid Prometheus metric name. +	Namespace string +	Subsystem string +	Name      string + +	// Help provides information about this Summary. Mandatory! +	// +	// Metrics with the same fully-qualified name must have the same Help +	// string. +	Help string + +	// ConstLabels are used to attach fixed labels to this +	// Summary. Summaries with the same fully-qualified name must have the +	// same label names in their ConstLabels. +	// +	// Note that in most cases, labels have a value that varies during the +	// lifetime of a process. Those labels are usually managed with a +	// SummaryVec. ConstLabels serve only special purposes. One is for the +	// special case where the value of a label does not change during the +	// lifetime of a process, e.g. if the revision of the running binary is +	// put into a label. Another, more advanced purpose is if more than one +	// Collector needs to collect Summaries with the same fully-qualified +	// name. In that case, those Summaries must differ in the values of +	// their ConstLabels. See the Collector examples. +	// +	// If the value of a label never changes (not even between binaries), +	// that label most likely should not be a label at all (but part of the +	// metric name). +	ConstLabels Labels + +	// Objectives defines the quantile rank estimates with their respective +	// absolute error. If Objectives[q] = e, then the value reported for q +	// will be the φ-quantile value for some φ between q-e and q+e.  The +	// default value is DefObjectives. It is used if Objectives is left at +	// its zero value (i.e. nil). To create a Summary without Objectives, +	// set it to an empty map (i.e. map[float64]float64{}). +	// +	// Deprecated: Note that the current value of DefObjectives is +	// deprecated. It will be replaced by an empty map in v0.10 of the +	// library. Please explicitly set Objectives to the desired value. +	Objectives map[float64]float64 + +	// MaxAge defines the duration for which an observation stays relevant +	// for the summary. Must be positive. The default value is DefMaxAge. +	MaxAge time.Duration + +	// AgeBuckets is the number of buckets used to exclude observations that +	// are older than MaxAge from the summary. A higher number has a +	// resource penalty, so only increase it if the higher resolution is +	// really required. For very high observation rates, you might want to +	// reduce the number of age buckets. With only one age bucket, you will +	// effectively see a complete reset of the summary each time MaxAge has +	// passed. The default value is DefAgeBuckets. +	AgeBuckets uint32 + +	// BufCap defines the default sample stream buffer size.  The default +	// value of DefBufCap should suffice for most uses. If there is a need +	// to increase the value, a multiple of 500 is recommended (because that +	// is the internal buffer size of the underlying package +	// "github.com/bmizerany/perks/quantile"). +	BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { +	return newSummary( +		NewDesc( +			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +			opts.Help, +			nil, +			opts.ConstLabels, +		), +		opts, +	) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { +	if len(desc.variableLabels) != len(labelValues) { +		panic(errInconsistentCardinality) +	} + +	for _, n := range desc.variableLabels { +		if n == quantileLabel { +			panic(errQuantileLabelNotAllowed) +		} +	} +	for _, lp := range desc.constLabelPairs { +		if lp.GetName() == quantileLabel { +			panic(errQuantileLabelNotAllowed) +		} +	} + +	if opts.Objectives == nil { +		opts.Objectives = DefObjectives +	} + +	if opts.MaxAge < 0 { +		panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) +	} +	if opts.MaxAge == 0 { +		opts.MaxAge = DefMaxAge +	} + +	if opts.AgeBuckets == 0 { +		opts.AgeBuckets = DefAgeBuckets +	} + +	if opts.BufCap == 0 { +		opts.BufCap = DefBufCap +	} + +	s := &summary{ +		desc: desc, + +		objectives:       opts.Objectives, +		sortedObjectives: make([]float64, 0, len(opts.Objectives)), + +		labelPairs: makeLabelPairs(desc, labelValues), + +		hotBuf:         make([]float64, 0, opts.BufCap), +		coldBuf:        make([]float64, 0, opts.BufCap), +		streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), +	} +	s.headStreamExpTime = time.Now().Add(s.streamDuration) +	s.hotBufExpTime = s.headStreamExpTime + +	for i := uint32(0); i < opts.AgeBuckets; i++ { +		s.streams = append(s.streams, s.newStream()) +	} +	s.headStream = s.streams[0] + +	for qu := range s.objectives { +		s.sortedObjectives = append(s.sortedObjectives, qu) +	} +	sort.Float64s(s.sortedObjectives) + +	s.init(s) // Init self-collection. +	return s +} + +type summary struct { +	selfCollector + +	bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. +	mtx    sync.Mutex // Protects every other moving part. +	// Lock bufMtx before mtx if both are needed. + +	desc *Desc + +	objectives       map[float64]float64 +	sortedObjectives []float64 + +	labelPairs []*dto.LabelPair + +	sum float64 +	cnt uint64 + +	hotBuf, coldBuf []float64 + +	streams                          []*quantile.Stream +	streamDuration                   time.Duration +	headStream                       *quantile.Stream +	headStreamIdx                    int +	headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { +	return s.desc +} + +func (s *summary) Observe(v float64) { +	s.bufMtx.Lock() +	defer s.bufMtx.Unlock() + +	now := time.Now() +	if now.After(s.hotBufExpTime) { +		s.asyncFlush(now) +	} +	s.hotBuf = append(s.hotBuf, v) +	if len(s.hotBuf) == cap(s.hotBuf) { +		s.asyncFlush(now) +	} +} + +func (s *summary) Write(out *dto.Metric) error { +	sum := &dto.Summary{} +	qs := make([]*dto.Quantile, 0, len(s.objectives)) + +	s.bufMtx.Lock() +	s.mtx.Lock() +	// Swap bufs even if hotBuf is empty to set new hotBufExpTime. +	s.swapBufs(time.Now()) +	s.bufMtx.Unlock() + +	s.flushColdBuf() +	sum.SampleCount = proto.Uint64(s.cnt) +	sum.SampleSum = proto.Float64(s.sum) + +	for _, rank := range s.sortedObjectives { +		var q float64 +		if s.headStream.Count() == 0 { +			q = math.NaN() +		} else { +			q = s.headStream.Query(rank) +		} +		qs = append(qs, &dto.Quantile{ +			Quantile: proto.Float64(rank), +			Value:    proto.Float64(q), +		}) +	} + +	s.mtx.Unlock() + +	if len(qs) > 0 { +		sort.Sort(quantSort(qs)) +	} +	sum.Quantile = qs + +	out.Summary = sum +	out.Label = s.labelPairs +	return nil +} + +func (s *summary) newStream() *quantile.Stream { +	return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { +	s.mtx.Lock() +	s.swapBufs(now) + +	// Unblock the original goroutine that was responsible for the mutation +	// that triggered the compaction.  But hold onto the global non-buffer +	// state mutex until the operation finishes. +	go func() { +		s.flushColdBuf() +		s.mtx.Unlock() +	}() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { +	for !s.hotBufExpTime.Equal(s.headStreamExpTime) { +		s.headStream.Reset() +		s.headStreamIdx++ +		if s.headStreamIdx >= len(s.streams) { +			s.headStreamIdx = 0 +		} +		s.headStream = s.streams[s.headStreamIdx] +		s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) +	} +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { +	for _, v := range s.coldBuf { +		for _, stream := range s.streams { +			stream.Insert(v) +		} +		s.cnt++ +		s.sum += v +	} +	s.coldBuf = s.coldBuf[0:0] +	s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { +	if len(s.coldBuf) != 0 { +		panic("coldBuf is not empty") +	} +	s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf +	// hotBuf is now empty and gets new expiration set. +	for now.After(s.hotBufExpTime) { +		s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) +	} +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { +	return len(s) +} + +func (s quantSort) Swap(i, j int) { +	s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { +	return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { +	*MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { +	desc := NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		labelNames, +		opts.ConstLabels, +	) +	return &SummaryVec{ +		MetricVec: newMetricVec(desc, func(lvs ...string) Metric { +			return newSummary(desc, opts, lvs...) +		}), +	} +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { +	metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +	if metric != nil { +		return metric.(Summary), err +	} +	return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { +	metric, err := m.MetricVec.GetMetricWith(labels) +	if metric != nil { +		return metric.(Summary), err +	} +	return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +//     myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { +	return m.MetricVec.WithLabelValues(lvs...).(Summary) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +//     myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Summary { +	return m.MetricVec.With(labels).(Summary) +} + +type constSummary struct { +	desc       *Desc +	count      uint64 +	sum        float64 +	quantiles  map[float64]float64 +	labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { +	return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { +	sum := &dto.Summary{} +	qs := make([]*dto.Quantile, 0, len(s.quantiles)) + +	sum.SampleCount = proto.Uint64(s.count) +	sum.SampleSum = proto.Float64(s.sum) + +	for rank, q := range s.quantiles { +		qs = append(qs, &dto.Quantile{ +			Quantile: proto.Float64(rank), +			Value:    proto.Float64(q), +		}) +	} + +	if len(qs) > 0 { +		sort.Sort(quantSort(qs)) +	} +	sum.Quantile = qs + +	out.Summary = sum +	out.Label = s.labelPairs + +	return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +//     map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( +	desc *Desc, +	count uint64, +	sum float64, +	quantiles map[float64]float64, +	labelValues ...string, +) (Metric, error) { +	if len(desc.variableLabels) != len(labelValues) { +		return nil, errInconsistentCardinality +	} +	return &constSummary{ +		desc:       desc, +		count:      count, +		sum:        sum, +		quantiles:  quantiles, +		labelPairs: makeLabelPairs(desc, labelValues), +	}, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( +	desc *Desc, +	count uint64, +	sum float64, +	quantiles map[float64]float64, +	labelValues ...string, +) Metric { +	m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) +	if err != nil { +		panic(err) +	} +	return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 0000000..f4cac5a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { +	Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { +	f(value) +} + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { +	begin    time.Time +	observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +//    func TimeMe() { +//        timer := NewTimer(myHistogram) +//        defer timer.ObserveDuration() +//        // Do actual work. +//    } +func NewTimer(o Observer) *Timer { +	return &Timer{ +		begin:    time.Now(), +		observer: o, +	} +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. ObserveDuration is +// usually called with a defer statement. +func (t *Timer) ObserveDuration() { +	if t.observer != nil { +		t.observer.Observe(time.Since(t.begin).Seconds()) +	} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 0000000..065501d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,143 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +// +// Deprecated: The Untyped type is deprecated because it doesn't make sense in +// direct instrumentation. If you need to mirror an external metric of unknown +// type (usually while writing exporters), Use MustNewConstMetric to create an +// untyped metric instance on the fly. +type Untyped interface { +	Metric +	Collector + +	// Set sets the Untyped metric to an arbitrary value. +	Set(float64) +	// Inc increments the Untyped metric by 1. +	Inc() +	// Dec decrements the Untyped metric by 1. +	Dec() +	// Add adds the given value to the Untyped metric. (The value can be +	// negative, resulting in a decrease.) +	Add(float64) +	// Sub subtracts the given value from the Untyped metric. (The value can +	// be negative, resulting in an increase.) +	Sub(float64) +} + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { +	return newValue(NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		nil, +		opts.ConstLabels, +	), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { +	*MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { +	desc := NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		labelNames, +		opts.ConstLabels, +	) +	return &UntypedVec{ +		MetricVec: newMetricVec(desc, func(lvs ...string) Metric { +			return newValue(desc, UntypedValue, 0, lvs...) +		}), +	} +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { +	metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +	if metric != nil { +		return metric.(Untyped), err +	} +	return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { +	metric, err := m.MetricVec.GetMetricWith(labels) +	if metric != nil { +		return metric.(Untyped), err +	} +	return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +//     myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { +	return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +//     myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { +	return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { +	Metric +	Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { +	return newValueFunc(NewDesc( +		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), +		opts.Help, +		nil, +		opts.ConstLabels, +	), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 0000000..7d3e810 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,239 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"errors" +	"fmt" +	"math" +	"sort" +	"sync/atomic" +	"time" + +	dto "github.com/prometheus/client_model/go" + +	"github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( +	_ ValueType = iota +	CounterValue +	GaugeValue +	UntypedValue +) + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { +	// valBits containst the bits of the represented float64 value. It has +	// to go first in the struct to guarantee alignment for atomic +	// operations.  http://golang.org/pkg/sync/atomic/#pkg-note-BUG +	valBits uint64 + +	selfCollector + +	desc       *Desc +	valType    ValueType +	labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { +	if len(labelValues) != len(desc.variableLabels) { +		panic(errInconsistentCardinality) +	} +	result := &value{ +		desc:       desc, +		valType:    valueType, +		valBits:    math.Float64bits(val), +		labelPairs: makeLabelPairs(desc, labelValues), +	} +	result.init(result) +	return result +} + +func (v *value) Desc() *Desc { +	return v.desc +} + +func (v *value) Set(val float64) { +	atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) SetToCurrentTime() { +	v.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (v *value) Inc() { +	v.Add(1) +} + +func (v *value) Dec() { +	v.Add(-1) +} + +func (v *value) Add(val float64) { +	for { +		oldBits := atomic.LoadUint64(&v.valBits) +		newBits := math.Float64bits(math.Float64frombits(oldBits) + val) +		if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { +			return +		} +	} +} + +func (v *value) Sub(val float64) { +	v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { +	val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) +	return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { +	selfCollector + +	desc       *Desc +	valType    ValueType +	function   func() float64 +	labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { +	result := &valueFunc{ +		desc:       desc, +		valType:    valueType, +		function:   function, +		labelPairs: makeLabelPairs(desc, nil), +	} +	result.init(result) +	return result +} + +func (v *valueFunc) Desc() *Desc { +	return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { +	return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { +	if len(desc.variableLabels) != len(labelValues) { +		return nil, errInconsistentCardinality +	} +	return &constMetric{ +		desc:       desc, +		valType:    valueType, +		val:        value, +		labelPairs: makeLabelPairs(desc, labelValues), +	}, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { +	m, err := NewConstMetric(desc, valueType, value, labelValues...) +	if err != nil { +		panic(err) +	} +	return m +} + +type constMetric struct { +	desc       *Desc +	valType    ValueType +	val        float64 +	labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { +	return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { +	return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( +	t ValueType, +	v float64, +	labelPairs []*dto.LabelPair, +	m *dto.Metric, +) error { +	m.Label = labelPairs +	switch t { +	case CounterValue: +		m.Counter = &dto.Counter{Value: proto.Float64(v)} +	case GaugeValue: +		m.Gauge = &dto.Gauge{Value: proto.Float64(v)} +	case UntypedValue: +		m.Untyped = &dto.Untyped{Value: proto.Float64(v)} +	default: +		return fmt.Errorf("encountered unknown type %v", t) +	} +	return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { +	totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) +	if totalLen == 0 { +		// Super fast path. +		return nil +	} +	if len(desc.variableLabels) == 0 { +		// Moderately fast path. +		return desc.constLabelPairs +	} +	labelPairs := make([]*dto.LabelPair, 0, totalLen) +	for i, n := range desc.variableLabels { +		labelPairs = append(labelPairs, &dto.LabelPair{ +			Name:  proto.String(n), +			Value: proto.String(labelValues[i]), +		}) +	} +	for _, lp := range desc.constLabelPairs { +		labelPairs = append(labelPairs, lp) +	} +	sort.Sort(LabelPairSorter(labelPairs)) +	return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 0000000..7f3eef9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,404 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( +	"fmt" +	"sync" + +	"github.com/prometheus/common/model" +) + +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { +	mtx      sync.RWMutex // Protects the children. +	children map[uint64][]metricWithLabelValues +	desc     *Desc + +	newMetric   func(labelValues ...string) Metric +	hashAdd     func(h uint64, s string) uint64 // replace hash function for testing collision handling +	hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized MetricVec. The concrete value is +// returned for embedding into another struct. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { +	return &MetricVec{ +		children:    map[uint64][]metricWithLabelValues{}, +		desc:        desc, +		newMetric:   newMetric, +		hashAdd:     hashAdd, +		hashAddByte: hashAddByte, +	} +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { +	values []string +	metric Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { +	ch <- m.desc +} + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { +	m.mtx.RLock() +	defer m.mtx.RUnlock() + +	for _, metrics := range m.children { +		for _, metric := range metrics { +			ch <- metric.metric +		} +	} +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { +	h, err := m.hashLabelValues(lvs) +	if err != nil { +		return nil, err +	} + +	return m.getOrCreateMetricWithLabelValues(h, lvs), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { +	h, err := m.hashLabels(labels) +	if err != nil { +		return nil, err +	} + +	return m.getOrCreateMetricWithLabels(h, labels), nil +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +//     httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { +	metric, err := m.GetMetricWithLabelValues(lvs...) +	if err != nil { +		panic(err) +	} +	return metric +} + +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +//     httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { +	metric, err := m.GetMetricWith(labels) +	if err != nil { +		panic(err) +	} +	return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc.  However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { +	m.mtx.Lock() +	defer m.mtx.Unlock() + +	h, err := m.hashLabelValues(lvs) +	if err != nil { +		return false +	} +	return m.deleteByHashWithLabelValues(h, lvs) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { +	m.mtx.Lock() +	defer m.mtx.Unlock() + +	h, err := m.hashLabels(labels) +	if err != nil { +		return false +	} + +	return m.deleteByHashWithLabels(h, labels) +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { +	metrics, ok := m.children[h] +	if !ok { +		return false +	} + +	i := m.findMetricWithLabelValues(metrics, lvs) +	if i >= len(metrics) { +		return false +	} + +	if len(metrics) > 1 { +		m.children[h] = append(metrics[:i], metrics[i+1:]...) +	} else { +		delete(m.children, h) +	} +	return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { +	metrics, ok := m.children[h] +	if !ok { +		return false +	} +	i := m.findMetricWithLabels(metrics, labels) +	if i >= len(metrics) { +		return false +	} + +	if len(metrics) > 1 { +		m.children[h] = append(metrics[:i], metrics[i+1:]...) +	} else { +		delete(m.children, h) +	} +	return true +} + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { +	m.mtx.Lock() +	defer m.mtx.Unlock() + +	for h := range m.children { +		delete(m.children, h) +	} +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { +	if len(vals) != len(m.desc.variableLabels) { +		return 0, errInconsistentCardinality +	} +	h := hashNew() +	for _, val := range vals { +		h = m.hashAdd(h, val) +		h = m.hashAddByte(h, model.SeparatorByte) +	} +	return h, nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { +	if len(labels) != len(m.desc.variableLabels) { +		return 0, errInconsistentCardinality +	} +	h := hashNew() +	for _, label := range m.desc.variableLabels { +		val, ok := labels[label] +		if !ok { +			return 0, fmt.Errorf("label name %q missing in label map", label) +		} +		h = m.hashAdd(h, val) +		h = m.hashAddByte(h, model.SeparatorByte) +	} +	return h, nil +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { +	m.mtx.RLock() +	metric, ok := m.getMetricWithLabelValues(hash, lvs) +	m.mtx.RUnlock() +	if ok { +		return metric +	} + +	m.mtx.Lock() +	defer m.mtx.Unlock() +	metric, ok = m.getMetricWithLabelValues(hash, lvs) +	if !ok { +		// Copy to avoid allocation in case wo don't go down this code path. +		copiedLVs := make([]string, len(lvs)) +		copy(copiedLVs, lvs) +		metric = m.newMetric(copiedLVs...) +		m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) +	} +	return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { +	m.mtx.RLock() +	metric, ok := m.getMetricWithLabels(hash, labels) +	m.mtx.RUnlock() +	if ok { +		return metric +	} + +	m.mtx.Lock() +	defer m.mtx.Unlock() +	metric, ok = m.getMetricWithLabels(hash, labels) +	if !ok { +		lvs := m.extractLabelValues(labels) +		metric = m.newMetric(lvs...) +		m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) +	} +	return metric +} + +// getMetricWithLabelValues gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { +	metrics, ok := m.children[h] +	if ok { +		if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { +			return metrics[i].metric, true +		} +	} +	return nil, false +} + +// getMetricWithLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { +	metrics, ok := m.children[h] +	if ok { +		if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { +			return metrics[i].metric, true +		} +	} +	return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { +	for i, metric := range metrics { +		if m.matchLabelValues(metric.values, lvs) { +			return i +		} +	} +	return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { +	for i, metric := range metrics { +		if m.matchLabels(metric.values, labels) { +			return i +		} +	} +	return len(metrics) +} + +func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { +	if len(values) != len(lvs) { +		return false +	} +	for i, v := range values { +		if v != lvs[i] { +			return false +		} +	} +	return true +} + +func (m *MetricVec) matchLabels(values []string, labels Labels) bool { +	if len(labels) != len(values) { +		return false +	} +	for i, k := range m.desc.variableLabels { +		if values[i] != labels[k] { +			return false +		} +	} +	return true +} + +func (m *MetricVec) extractLabelValues(labels Labels) []string { +	labelValues := make([]string, len(labels)) +	for i, k := range m.desc.variableLabels { +		labelValues[i] = labels[k] +	} +	return labelValues +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ +                                 Apache License +                           Version 2.0, January 2004 +                        http://www.apache.org/licenses/ + +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +   1. Definitions. + +      "License" shall mean the terms and conditions for use, reproduction, +      and distribution as defined by Sections 1 through 9 of this document. + +      "Licensor" shall mean the copyright owner or entity authorized by +      the copyright owner that is granting the License. + +      "Legal Entity" shall mean the union of the acting entity and all +      other entities that control, are controlled by, or are under common +      control with that entity. For the purposes of this definition, +      "control" means (i) the power, direct or indirect, to cause the +      direction or management of such entity, whether by contract or +      otherwise, or (ii) ownership of fifty percent (50%) or more of the +      outstanding shares, or (iii) beneficial ownership of such entity. + +      "You" (or "Your") shall mean an individual or Legal Entity +      exercising permissions granted by this License. + +      "Source" form shall mean the preferred form for making modifications, +      including but not limited to software source code, documentation +      source, and configuration files. + +      "Object" form shall mean any form resulting from mechanical +      transformation or translation of a Source form, including but +      not limited to compiled object code, generated documentation, +      and conversions to other media types. + +      "Work" shall mean the work of authorship, whether in Source or +      Object form, made available under the License, as indicated by a +      copyright notice that is included in or attached to the work +      (an example is provided in the Appendix below). + +      "Derivative Works" shall mean any work, whether in Source or Object +      form, that is based on (or derived from) the Work and for which the +      editorial revisions, annotations, elaborations, or other modifications +      represent, as a whole, an original work of authorship. For the purposes +      of this License, Derivative Works shall not include works that remain +      separable from, or merely link (or bind by name) to the interfaces of, +      the Work and Derivative Works thereof. + +      "Contribution" shall mean any work of authorship, including +      the original version of the Work and any modifications or additions +      to that Work or Derivative Works thereof, that is intentionally +      submitted to Licensor for inclusion in the Work by the copyright owner +      or by an individual or Legal Entity authorized to submit on behalf of +      the copyright owner. For the purposes of this definition, "submitted" +      means any form of electronic, verbal, or written communication sent +      to the Licensor or its representatives, including but not limited to +      communication on electronic mailing lists, source code control systems, +      and issue tracking systems that are managed by, or on behalf of, the +      Licensor for the purpose of discussing and improving the Work, but +      excluding communication that is conspicuously marked or otherwise +      designated in writing by the copyright owner as "Not a Contribution." + +      "Contributor" shall mean Licensor and any individual or Legal Entity +      on behalf of whom a Contribution has been received by Licensor and +      subsequently incorporated within the Work. + +   2. Grant of Copyright License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      copyright license to reproduce, prepare Derivative Works of, +      publicly display, publicly perform, sublicense, and distribute the +      Work and such Derivative Works in Source or Object form. + +   3. Grant of Patent License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      (except as stated in this section) patent license to make, have made, +      use, offer to sell, sell, import, and otherwise transfer the Work, +      where such license applies only to those patent claims licensable +      by such Contributor that are necessarily infringed by their +      Contribution(s) alone or by combination of their Contribution(s) +      with the Work to which such Contribution(s) was submitted. If You +      institute patent litigation against any entity (including a +      cross-claim or counterclaim in a lawsuit) alleging that the Work +      or a Contribution incorporated within the Work constitutes direct +      or contributory patent infringement, then any patent licenses +      granted to You under this License for that Work shall terminate +      as of the date such litigation is filed. + +   4. Redistribution. You may reproduce and distribute copies of the +      Work or Derivative Works thereof in any medium, with or without +      modifications, and in Source or Object form, provided that You +      meet the following conditions: + +      (a) You must give any other recipients of the Work or +          Derivative Works a copy of this License; and + +      (b) You must cause any modified files to carry prominent notices +          stating that You changed the files; and + +      (c) You must retain, in the Source form of any Derivative Works +          that You distribute, all copyright, patent, trademark, and +          attribution notices from the Source form of the Work, +          excluding those notices that do not pertain to any part of +          the Derivative Works; and + +      (d) If the Work includes a "NOTICE" text file as part of its +          distribution, then any Derivative Works that You distribute must +          include a readable copy of the attribution notices contained +          within such NOTICE file, excluding those notices that do not +          pertain to any part of the Derivative Works, in at least one +          of the following places: within a NOTICE text file distributed +          as part of the Derivative Works; within the Source form or +          documentation, if provided along with the Derivative Works; or, +          within a display generated by the Derivative Works, if and +          wherever such third-party notices normally appear. The contents +          of the NOTICE file are for informational purposes only and +          do not modify the License. You may add Your own attribution +          notices within Derivative Works that You distribute, alongside +          or as an addendum to the NOTICE text from the Work, provided +          that such additional attribution notices cannot be construed +          as modifying the License. + +      You may add Your own copyright statement to Your modifications and +      may provide additional or different license terms and conditions +      for use, reproduction, or distribution of Your modifications, or +      for any such Derivative Works as a whole, provided Your use, +      reproduction, and distribution of the Work otherwise complies with +      the conditions stated in this License. + +   5. Submission of Contributions. Unless You explicitly state otherwise, +      any Contribution intentionally submitted for inclusion in the Work +      by You to the Licensor shall be under the terms and conditions of +      this License, without any additional terms or conditions. +      Notwithstanding the above, nothing herein shall supersede or modify +      the terms of any separate license agreement you may have executed +      with Licensor regarding such Contributions. + +   6. Trademarks. This License does not grant permission to use the trade +      names, trademarks, service marks, or product names of the Licensor, +      except as required for reasonable and customary use in describing the +      origin of the Work and reproducing the content of the NOTICE file. + +   7. Disclaimer of Warranty. Unless required by applicable law or +      agreed to in writing, Licensor provides the Work (and each +      Contributor provides its Contributions) on an "AS IS" BASIS, +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +      implied, including, without limitation, any warranties or conditions +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +      PARTICULAR PURPOSE. You are solely responsible for determining the +      appropriateness of using or redistributing the Work and assume any +      risks associated with Your exercise of permissions under this License. + +   8. Limitation of Liability. In no event and under no legal theory, +      whether in tort (including negligence), contract, or otherwise, +      unless required by applicable law (such as deliberate and grossly +      negligent acts) or agreed to in writing, shall any Contributor be +      liable to You for damages, including any direct, indirect, special, +      incidental, or consequential damages of any character arising as a +      result of this License or out of the use or inability to use the +      Work (including but not limited to damages for loss of goodwill, +      work stoppage, computer failure or malfunction, or any and all +      other commercial damages or losses), even if such Contributor +      has been advised of the possibility of such damages. + +   9. Accepting Warranty or Additional Liability. While redistributing +      the Work or Derivative Works thereof, You may choose to offer, +      and charge a fee for, acceptance of support, warranty, indemnity, +      or other liability obligations and/or rights consistent with this +      License. However, in accepting such obligations, You may act only +      on Your own behalf and on Your sole responsibility, not on behalf +      of any other Contributor, and only if You agree to indemnify, +      defend, and hold each Contributor harmless for any liability +      incurred by, or claims asserted against, such Contributor by reason +      of your accepting any such warranty or additional liability. + +   END OF TERMS AND CONDITIONS + +   APPENDIX: How to apply the Apache License to your work. + +      To apply the Apache License to your work, attach the following +      boilerplate notice, with the fields enclosed by brackets "[]" +      replaced with your own identifying information. (Don't include +      the brackets!)  The text should be enclosed in the appropriate +      comment syntax for the file format. We also recommend that a +      file or class name and description of purpose be included on the +      same "printed page" as the copyright notice for easier +      identification within third-party archives. + +   Copyright [yyyy] [name of copyright owner] + +   Licensed under the Apache License, Version 2.0 (the "License"); +   you may not use this file except in compliance with the License. +   You may obtain a copy of the License at + +       http://www.apache.org/licenses/LICENSE-2.0 + +   Unless required by applicable law or agreed to in writing, software +   distributed under the License is distributed on an "AS IS" BASIS, +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +   See the License for the specific language governing permissions and +   limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 0000000..20110e4 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 0000000..b065f86 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: +	metrics.proto + +It has these top-level messages: +	LabelPair +	Gauge +	Counter +	Quantile +	Summary +	Untyped +	Histogram +	Bucket +	Metric +	MetricFamily +*/ +package io_prometheus_client + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MetricType int32 + +const ( +	MetricType_COUNTER   MetricType = 0 +	MetricType_GAUGE     MetricType = 1 +	MetricType_SUMMARY   MetricType = 2 +	MetricType_UNTYPED   MetricType = 3 +	MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ +	0: "COUNTER", +	1: "GAUGE", +	2: "SUMMARY", +	3: "UNTYPED", +	4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ +	"COUNTER":   0, +	"GAUGE":     1, +	"SUMMARY":   2, +	"UNTYPED":   3, +	"HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { +	p := new(MetricType) +	*p = x +	return p +} +func (x MetricType) String() string { +	return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { +	value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") +	if err != nil { +		return err +	} +	*x = MetricType(value) +	return nil +} + +type LabelPair struct { +	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +	Value            *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +	XXX_unrecognized []byte  `json:"-"` +} + +func (m *LabelPair) Reset()         { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage()    {} + +func (m *LabelPair) GetName() string { +	if m != nil && m.Name != nil { +		return *m.Name +	} +	return "" +} + +func (m *LabelPair) GetValue() string { +	if m != nil && m.Value != nil { +		return *m.Value +	} +	return "" +} + +type Gauge struct { +	Value            *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +	XXX_unrecognized []byte   `json:"-"` +} + +func (m *Gauge) Reset()         { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage()    {} + +func (m *Gauge) GetValue() float64 { +	if m != nil && m.Value != nil { +		return *m.Value +	} +	return 0 +} + +type Counter struct { +	Value            *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +	XXX_unrecognized []byte   `json:"-"` +} + +func (m *Counter) Reset()         { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage()    {} + +func (m *Counter) GetValue() float64 { +	if m != nil && m.Value != nil { +		return *m.Value +	} +	return 0 +} + +type Quantile struct { +	Quantile         *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` +	Value            *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` +	XXX_unrecognized []byte   `json:"-"` +} + +func (m *Quantile) Reset()         { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage()    {} + +func (m *Quantile) GetQuantile() float64 { +	if m != nil && m.Quantile != nil { +		return *m.Quantile +	} +	return 0 +} + +func (m *Quantile) GetValue() float64 { +	if m != nil && m.Value != nil { +		return *m.Value +	} +	return 0 +} + +type Summary struct { +	SampleCount      *uint64     `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` +	SampleSum        *float64    `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` +	Quantile         []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` +	XXX_unrecognized []byte      `json:"-"` +} + +func (m *Summary) Reset()         { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage()    {} + +func (m *Summary) GetSampleCount() uint64 { +	if m != nil && m.SampleCount != nil { +		return *m.SampleCount +	} +	return 0 +} + +func (m *Summary) GetSampleSum() float64 { +	if m != nil && m.SampleSum != nil { +		return *m.SampleSum +	} +	return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { +	if m != nil { +		return m.Quantile +	} +	return nil +} + +type Untyped struct { +	Value            *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +	XXX_unrecognized []byte   `json:"-"` +} + +func (m *Untyped) Reset()         { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage()    {} + +func (m *Untyped) GetValue() float64 { +	if m != nil && m.Value != nil { +		return *m.Value +	} +	return 0 +} + +type Histogram struct { +	SampleCount      *uint64   `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` +	SampleSum        *float64  `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` +	Bucket           []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` +	XXX_unrecognized []byte    `json:"-"` +} + +func (m *Histogram) Reset()         { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage()    {} + +func (m *Histogram) GetSampleCount() uint64 { +	if m != nil && m.SampleCount != nil { +		return *m.SampleCount +	} +	return 0 +} + +func (m *Histogram) GetSampleSum() float64 { +	if m != nil && m.SampleSum != nil { +		return *m.SampleSum +	} +	return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { +	if m != nil { +		return m.Bucket +	} +	return nil +} + +type Bucket struct { +	CumulativeCount  *uint64  `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` +	UpperBound       *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` +	XXX_unrecognized []byte   `json:"-"` +} + +func (m *Bucket) Reset()         { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage()    {} + +func (m *Bucket) GetCumulativeCount() uint64 { +	if m != nil && m.CumulativeCount != nil { +		return *m.CumulativeCount +	} +	return 0 +} + +func (m *Bucket) GetUpperBound() float64 { +	if m != nil && m.UpperBound != nil { +		return *m.UpperBound +	} +	return 0 +} + +type Metric struct { +	Label            []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` +	Gauge            *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` +	Counter          *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` +	Summary          *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` +	Untyped          *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` +	Histogram        *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` +	TimestampMs      *int64       `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` +	XXX_unrecognized []byte       `json:"-"` +} + +func (m *Metric) Reset()         { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage()    {} + +func (m *Metric) GetLabel() []*LabelPair { +	if m != nil { +		return m.Label +	} +	return nil +} + +func (m *Metric) GetGauge() *Gauge { +	if m != nil { +		return m.Gauge +	} +	return nil +} + +func (m *Metric) GetCounter() *Counter { +	if m != nil { +		return m.Counter +	} +	return nil +} + +func (m *Metric) GetSummary() *Summary { +	if m != nil { +		return m.Summary +	} +	return nil +} + +func (m *Metric) GetUntyped() *Untyped { +	if m != nil { +		return m.Untyped +	} +	return nil +} + +func (m *Metric) GetHistogram() *Histogram { +	if m != nil { +		return m.Histogram +	} +	return nil +} + +func (m *Metric) GetTimestampMs() int64 { +	if m != nil && m.TimestampMs != nil { +		return *m.TimestampMs +	} +	return 0 +} + +type MetricFamily struct { +	Name             *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +	Help             *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` +	Type             *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` +	Metric           []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` +	XXX_unrecognized []byte      `json:"-"` +} + +func (m *MetricFamily) Reset()         { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage()    {} + +func (m *MetricFamily) GetName() string { +	if m != nil && m.Name != nil { +		return *m.Name +	} +	return "" +} + +func (m *MetricFamily) GetHelp() string { +	if m != nil && m.Help != nil { +		return *m.Help +	} +	return "" +} + +func (m *MetricFamily) GetType() MetricType { +	if m != nil && m.Type != nil { +		return *m.Type +	} +	return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { +	if m != nil { +		return m.Metric +	} +	return nil +} + +func init() { +	proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ +                                 Apache License +                           Version 2.0, January 2004 +                        http://www.apache.org/licenses/ + +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +   1. Definitions. + +      "License" shall mean the terms and conditions for use, reproduction, +      and distribution as defined by Sections 1 through 9 of this document. + +      "Licensor" shall mean the copyright owner or entity authorized by +      the copyright owner that is granting the License. + +      "Legal Entity" shall mean the union of the acting entity and all +      other entities that control, are controlled by, or are under common +      control with that entity. For the purposes of this definition, +      "control" means (i) the power, direct or indirect, to cause the +      direction or management of such entity, whether by contract or +      otherwise, or (ii) ownership of fifty percent (50%) or more of the +      outstanding shares, or (iii) beneficial ownership of such entity. + +      "You" (or "Your") shall mean an individual or Legal Entity +      exercising permissions granted by this License. + +      "Source" form shall mean the preferred form for making modifications, +      including but not limited to software source code, documentation +      source, and configuration files. + +      "Object" form shall mean any form resulting from mechanical +      transformation or translation of a Source form, including but +      not limited to compiled object code, generated documentation, +      and conversions to other media types. + +      "Work" shall mean the work of authorship, whether in Source or +      Object form, made available under the License, as indicated by a +      copyright notice that is included in or attached to the work +      (an example is provided in the Appendix below). + +      "Derivative Works" shall mean any work, whether in Source or Object +      form, that is based on (or derived from) the Work and for which the +      editorial revisions, annotations, elaborations, or other modifications +      represent, as a whole, an original work of authorship. For the purposes +      of this License, Derivative Works shall not include works that remain +      separable from, or merely link (or bind by name) to the interfaces of, +      the Work and Derivative Works thereof. + +      "Contribution" shall mean any work of authorship, including +      the original version of the Work and any modifications or additions +      to that Work or Derivative Works thereof, that is intentionally +      submitted to Licensor for inclusion in the Work by the copyright owner +      or by an individual or Legal Entity authorized to submit on behalf of +      the copyright owner. For the purposes of this definition, "submitted" +      means any form of electronic, verbal, or written communication sent +      to the Licensor or its representatives, including but not limited to +      communication on electronic mailing lists, source code control systems, +      and issue tracking systems that are managed by, or on behalf of, the +      Licensor for the purpose of discussing and improving the Work, but +      excluding communication that is conspicuously marked or otherwise +      designated in writing by the copyright owner as "Not a Contribution." + +      "Contributor" shall mean Licensor and any individual or Legal Entity +      on behalf of whom a Contribution has been received by Licensor and +      subsequently incorporated within the Work. + +   2. Grant of Copyright License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      copyright license to reproduce, prepare Derivative Works of, +      publicly display, publicly perform, sublicense, and distribute the +      Work and such Derivative Works in Source or Object form. + +   3. Grant of Patent License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      (except as stated in this section) patent license to make, have made, +      use, offer to sell, sell, import, and otherwise transfer the Work, +      where such license applies only to those patent claims licensable +      by such Contributor that are necessarily infringed by their +      Contribution(s) alone or by combination of their Contribution(s) +      with the Work to which such Contribution(s) was submitted. If You +      institute patent litigation against any entity (including a +      cross-claim or counterclaim in a lawsuit) alleging that the Work +      or a Contribution incorporated within the Work constitutes direct +      or contributory patent infringement, then any patent licenses +      granted to You under this License for that Work shall terminate +      as of the date such litigation is filed. + +   4. Redistribution. You may reproduce and distribute copies of the +      Work or Derivative Works thereof in any medium, with or without +      modifications, and in Source or Object form, provided that You +      meet the following conditions: + +      (a) You must give any other recipients of the Work or +          Derivative Works a copy of this License; and + +      (b) You must cause any modified files to carry prominent notices +          stating that You changed the files; and + +      (c) You must retain, in the Source form of any Derivative Works +          that You distribute, all copyright, patent, trademark, and +          attribution notices from the Source form of the Work, +          excluding those notices that do not pertain to any part of +          the Derivative Works; and + +      (d) If the Work includes a "NOTICE" text file as part of its +          distribution, then any Derivative Works that You distribute must +          include a readable copy of the attribution notices contained +          within such NOTICE file, excluding those notices that do not +          pertain to any part of the Derivative Works, in at least one +          of the following places: within a NOTICE text file distributed +          as part of the Derivative Works; within the Source form or +          documentation, if provided along with the Derivative Works; or, +          within a display generated by the Derivative Works, if and +          wherever such third-party notices normally appear. The contents +          of the NOTICE file are for informational purposes only and +          do not modify the License. You may add Your own attribution +          notices within Derivative Works that You distribute, alongside +          or as an addendum to the NOTICE text from the Work, provided +          that such additional attribution notices cannot be construed +          as modifying the License. + +      You may add Your own copyright statement to Your modifications and +      may provide additional or different license terms and conditions +      for use, reproduction, or distribution of Your modifications, or +      for any such Derivative Works as a whole, provided Your use, +      reproduction, and distribution of the Work otherwise complies with +      the conditions stated in this License. + +   5. Submission of Contributions. Unless You explicitly state otherwise, +      any Contribution intentionally submitted for inclusion in the Work +      by You to the Licensor shall be under the terms and conditions of +      this License, without any additional terms or conditions. +      Notwithstanding the above, nothing herein shall supersede or modify +      the terms of any separate license agreement you may have executed +      with Licensor regarding such Contributions. + +   6. Trademarks. This License does not grant permission to use the trade +      names, trademarks, service marks, or product names of the Licensor, +      except as required for reasonable and customary use in describing the +      origin of the Work and reproducing the content of the NOTICE file. + +   7. Disclaimer of Warranty. Unless required by applicable law or +      agreed to in writing, Licensor provides the Work (and each +      Contributor provides its Contributions) on an "AS IS" BASIS, +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +      implied, including, without limitation, any warranties or conditions +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +      PARTICULAR PURPOSE. You are solely responsible for determining the +      appropriateness of using or redistributing the Work and assume any +      risks associated with Your exercise of permissions under this License. + +   8. Limitation of Liability. In no event and under no legal theory, +      whether in tort (including negligence), contract, or otherwise, +      unless required by applicable law (such as deliberate and grossly +      negligent acts) or agreed to in writing, shall any Contributor be +      liable to You for damages, including any direct, indirect, special, +      incidental, or consequential damages of any character arising as a +      result of this License or out of the use or inability to use the +      Work (including but not limited to damages for loss of goodwill, +      work stoppage, computer failure or malfunction, or any and all +      other commercial damages or losses), even if such Contributor +      has been advised of the possibility of such damages. + +   9. Accepting Warranty or Additional Liability. While redistributing +      the Work or Derivative Works thereof, You may choose to offer, +      and charge a fee for, acceptance of support, warranty, indemnity, +      or other liability obligations and/or rights consistent with this +      License. However, in accepting such obligations, You may act only +      on Your own behalf and on Your sole responsibility, not on behalf +      of any other Contributor, and only if You agree to indemnify, +      defend, and hold each Contributor harmless for any liability +      incurred by, or claims asserted against, such Contributor by reason +      of your accepting any such warranty or additional liability. + +   END OF TERMS AND CONDITIONS + +   APPENDIX: How to apply the Apache License to your work. + +      To apply the Apache License to your work, attach the following +      boilerplate notice, with the fields enclosed by brackets "[]" +      replaced with your own identifying information. (Don't include +      the brackets!)  The text should be enclosed in the appropriate +      comment syntax for the file format. We also recommend that a +      file or class name and description of purpose be included on the +      same "printed page" as the copyright notice for easier +      identification within third-party archives. + +   Copyright [yyyy] [name of copyright owner] + +   Licensed under the Apache License, Version 2.0 (the "License"); +   you may not use this file except in compliance with the License. +   You may obtain a copy of the License at + +       http://www.apache.org/licenses/LICENSE-2.0 + +   Unless required by applicable law or agreed to in writing, software +   distributed under the License is distributed on an "AS IS" BASIS, +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +   See the License for the specific language governing permissions and +   limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 0000000..636a2c1 --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 0000000..a7a42d5 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( +	"fmt" +	"io" +	"math" +	"mime" +	"net/http" + +	dto "github.com/prometheus/client_model/go" + +	"github.com/matttproud/golang_protobuf_extensions/pbutil" +	"github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { +	Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { +	// Timestamp is added to each value from the stream that has no explicit timestamp set. +	Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { +	ct := h.Get(hdrContentType) + +	mediatype, params, err := mime.ParseMediaType(ct) +	if err != nil { +		return FmtUnknown +	} + +	const textType = "text/plain" + +	switch mediatype { +	case ProtoType: +		if p, ok := params["proto"]; ok && p != ProtoProtocol { +			return FmtUnknown +		} +		if e, ok := params["encoding"]; ok && e != "delimited" { +			return FmtUnknown +		} +		return FmtProtoDelim + +	case textType: +		if v, ok := params["version"]; ok && v != TextVersion { +			return FmtUnknown +		} +		return FmtText +	} + +	return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { +	switch format { +	case FmtProtoDelim: +		return &protoDecoder{r: r} +	} +	return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { +	r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { +	_, err := pbutil.ReadDelimited(d.r, v) +	if err != nil { +		return err +	} +	if !model.IsValidMetricName(model.LabelValue(v.GetName())) { +		return fmt.Errorf("invalid metric name %q", v.GetName()) +	} +	for _, m := range v.GetMetric() { +		if m == nil { +			continue +		} +		for _, l := range m.GetLabel() { +			if l == nil { +				continue +			} +			if !model.LabelValue(l.GetValue()).IsValid() { +				return fmt.Errorf("invalid label value %q", l.GetValue()) +			} +			if !model.LabelName(l.GetName()).IsValid() { +				return fmt.Errorf("invalid label name %q", l.GetName()) +			} +		} +	} +	return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { +	r    io.Reader +	p    TextParser +	fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { +	// TODO(fabxc): Wrap this as a line reader to make streaming safer. +	if len(d.fams) == 0 { +		// No cached metric families, read everything and parse metrics. +		fams, err := d.p.TextToMetricFamilies(d.r) +		if err != nil { +			return err +		} +		if len(fams) == 0 { +			return io.EOF +		} +		d.fams = make([]*dto.MetricFamily, 0, len(fams)) +		for _, f := range fams { +			d.fams = append(d.fams, f) +		} +	} + +	*v = *d.fams[0] +	d.fams = d.fams[1:] + +	return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { +	Dec  Decoder +	Opts *DecodeOptions + +	f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { +	err := sd.Dec.Decode(&sd.f) +	if err != nil { +		return err +	} +	*s, err = extractSamples(&sd.f, sd.Opts) +	return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occured. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { +	var ( +		all     model.Vector +		lastErr error +	) +	for _, f := range fams { +		some, err := extractSamples(f, o) +		if err != nil { +			lastErr = err +			continue +		} +		all = append(all, some...) +	} +	return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { +	switch f.GetType() { +	case dto.MetricType_COUNTER: +		return extractCounter(o, f), nil +	case dto.MetricType_GAUGE: +		return extractGauge(o, f), nil +	case dto.MetricType_SUMMARY: +		return extractSummary(o, f), nil +	case dto.MetricType_UNTYPED: +		return extractUntyped(o, f), nil +	case dto.MetricType_HISTOGRAM: +		return extractHistogram(o, f), nil +	} +	return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { +	samples := make(model.Vector, 0, len(f.Metric)) + +	for _, m := range f.Metric { +		if m.Counter == nil { +			continue +		} + +		lset := make(model.LabelSet, len(m.Label)+1) +		for _, p := range m.Label { +			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +		} +		lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + +		smpl := &model.Sample{ +			Metric: model.Metric(lset), +			Value:  model.SampleValue(m.Counter.GetValue()), +		} + +		if m.TimestampMs != nil { +			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) +		} else { +			smpl.Timestamp = o.Timestamp +		} + +		samples = append(samples, smpl) +	} + +	return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { +	samples := make(model.Vector, 0, len(f.Metric)) + +	for _, m := range f.Metric { +		if m.Gauge == nil { +			continue +		} + +		lset := make(model.LabelSet, len(m.Label)+1) +		for _, p := range m.Label { +			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +		} +		lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + +		smpl := &model.Sample{ +			Metric: model.Metric(lset), +			Value:  model.SampleValue(m.Gauge.GetValue()), +		} + +		if m.TimestampMs != nil { +			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) +		} else { +			smpl.Timestamp = o.Timestamp +		} + +		samples = append(samples, smpl) +	} + +	return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { +	samples := make(model.Vector, 0, len(f.Metric)) + +	for _, m := range f.Metric { +		if m.Untyped == nil { +			continue +		} + +		lset := make(model.LabelSet, len(m.Label)+1) +		for _, p := range m.Label { +			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +		} +		lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + +		smpl := &model.Sample{ +			Metric: model.Metric(lset), +			Value:  model.SampleValue(m.Untyped.GetValue()), +		} + +		if m.TimestampMs != nil { +			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) +		} else { +			smpl.Timestamp = o.Timestamp +		} + +		samples = append(samples, smpl) +	} + +	return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { +	samples := make(model.Vector, 0, len(f.Metric)) + +	for _, m := range f.Metric { +		if m.Summary == nil { +			continue +		} + +		timestamp := o.Timestamp +		if m.TimestampMs != nil { +			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) +		} + +		for _, q := range m.Summary.Quantile { +			lset := make(model.LabelSet, len(m.Label)+2) +			for _, p := range m.Label { +				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +			} +			// BUG(matt): Update other names to "quantile". +			lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) +			lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + +			samples = append(samples, &model.Sample{ +				Metric:    model.Metric(lset), +				Value:     model.SampleValue(q.GetValue()), +				Timestamp: timestamp, +			}) +		} + +		lset := make(model.LabelSet, len(m.Label)+1) +		for _, p := range m.Label { +			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +		} +		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + +		samples = append(samples, &model.Sample{ +			Metric:    model.Metric(lset), +			Value:     model.SampleValue(m.Summary.GetSampleSum()), +			Timestamp: timestamp, +		}) + +		lset = make(model.LabelSet, len(m.Label)+1) +		for _, p := range m.Label { +			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +		} +		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + +		samples = append(samples, &model.Sample{ +			Metric:    model.Metric(lset), +			Value:     model.SampleValue(m.Summary.GetSampleCount()), +			Timestamp: timestamp, +		}) +	} + +	return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { +	samples := make(model.Vector, 0, len(f.Metric)) + +	for _, m := range f.Metric { +		if m.Histogram == nil { +			continue +		} + +		timestamp := o.Timestamp +		if m.TimestampMs != nil { +			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) +		} + +		infSeen := false + +		for _, q := range m.Histogram.Bucket { +			lset := make(model.LabelSet, len(m.Label)+2) +			for _, p := range m.Label { +				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +			} +			lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) +			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + +			if math.IsInf(q.GetUpperBound(), +1) { +				infSeen = true +			} + +			samples = append(samples, &model.Sample{ +				Metric:    model.Metric(lset), +				Value:     model.SampleValue(q.GetCumulativeCount()), +				Timestamp: timestamp, +			}) +		} + +		lset := make(model.LabelSet, len(m.Label)+1) +		for _, p := range m.Label { +			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +		} +		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + +		samples = append(samples, &model.Sample{ +			Metric:    model.Metric(lset), +			Value:     model.SampleValue(m.Histogram.GetSampleSum()), +			Timestamp: timestamp, +		}) + +		lset = make(model.LabelSet, len(m.Label)+1) +		for _, p := range m.Label { +			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +		} +		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + +		count := &model.Sample{ +			Metric:    model.Metric(lset), +			Value:     model.SampleValue(m.Histogram.GetSampleCount()), +			Timestamp: timestamp, +		} +		samples = append(samples, count) + +		if !infSeen { +			// Append an infinity bucket sample. +			lset := make(model.LabelSet, len(m.Label)+2) +			for _, p := range m.Label { +				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) +			} +			lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") +			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + +			samples = append(samples, &model.Sample{ +				Metric:    model.Metric(lset), +				Value:     count.Value, +				Timestamp: timestamp, +			}) +		} +	} + +	return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 0000000..11839ed --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( +	"fmt" +	"io" +	"net/http" + +	"github.com/golang/protobuf/proto" +	"github.com/matttproud/golang_protobuf_extensions/pbutil" +	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + +	dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { +	Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { +	return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { +	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { +		// Check for protocol buffer +		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { +			switch ac.Params["encoding"] { +			case "delimited": +				return FmtProtoDelim +			case "text": +				return FmtProtoText +			case "compact-text": +				return FmtProtoCompact +			} +		} +		// Check for text format. +		ver := ac.Params["version"] +		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { +			return FmtText +		} +	} +	return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { +	switch format { +	case FmtProtoDelim: +		return encoder(func(v *dto.MetricFamily) error { +			_, err := pbutil.WriteDelimited(w, v) +			return err +		}) +	case FmtProtoCompact: +		return encoder(func(v *dto.MetricFamily) error { +			_, err := fmt.Fprintln(w, v.String()) +			return err +		}) +	case FmtProtoText: +		return encoder(func(v *dto.MetricFamily) error { +			_, err := fmt.Fprintln(w, proto.MarshalTextString(v)) +			return err +		}) +	case FmtText: +		return encoder(func(v *dto.MetricFamily) error { +			_, err := MetricFamilyToText(w, v) +			return err +		}) +	} +	panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 0000000..371ac75 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( +	TextVersion   = "0.0.4" +	ProtoType     = `application/vnd.google.protobuf` +	ProtoProtocol = `io.prometheus.client.MetricFamily` +	ProtoFmt      = ProtoType + "; proto=" + ProtoProtocol + ";" + +	// The Content-Type values for the different wire protocols. +	FmtUnknown      Format = `<unknown>` +	FmtText         Format = `text/plain; version=` + TextVersion +	FmtProtoDelim   Format = ProtoFmt + ` encoding=delimited` +	FmtProtoText    Format = ProtoFmt + ` encoding=text` +	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( +	hdrContentType = "Content-Type" +	hdrAccept      = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 0000000..dc2eede --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +//     go-fuzz-build github.com/prometheus/common/expfmt +//     go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { +	parser := TextParser{} +	_, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + +	if err != nil { +		return 0 +	} + +	return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 0000000..f11321c --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,303 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( +	"fmt" +	"io" +	"math" +	"strings" + +	dto "github.com/prometheus/client_model/go" +	"github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { +	var written int + +	// Fail-fast checks. +	if len(in.Metric) == 0 { +		return written, fmt.Errorf("MetricFamily has no metrics: %s", in) +	} +	name := in.GetName() +	if name == "" { +		return written, fmt.Errorf("MetricFamily has no name: %s", in) +	} + +	// Comments, first HELP, then TYPE. +	if in.Help != nil { +		n, err := fmt.Fprintf( +			out, "# HELP %s %s\n", +			name, escapeString(*in.Help, false), +		) +		written += n +		if err != nil { +			return written, err +		} +	} +	metricType := in.GetType() +	n, err := fmt.Fprintf( +		out, "# TYPE %s %s\n", +		name, strings.ToLower(metricType.String()), +	) +	written += n +	if err != nil { +		return written, err +	} + +	// Finally the samples, one line for each. +	for _, metric := range in.Metric { +		switch metricType { +		case dto.MetricType_COUNTER: +			if metric.Counter == nil { +				return written, fmt.Errorf( +					"expected counter in metric %s %s", name, metric, +				) +			} +			n, err = writeSample( +				name, metric, "", "", +				metric.Counter.GetValue(), +				out, +			) +		case dto.MetricType_GAUGE: +			if metric.Gauge == nil { +				return written, fmt.Errorf( +					"expected gauge in metric %s %s", name, metric, +				) +			} +			n, err = writeSample( +				name, metric, "", "", +				metric.Gauge.GetValue(), +				out, +			) +		case dto.MetricType_UNTYPED: +			if metric.Untyped == nil { +				return written, fmt.Errorf( +					"expected untyped in metric %s %s", name, metric, +				) +			} +			n, err = writeSample( +				name, metric, "", "", +				metric.Untyped.GetValue(), +				out, +			) +		case dto.MetricType_SUMMARY: +			if metric.Summary == nil { +				return written, fmt.Errorf( +					"expected summary in metric %s %s", name, metric, +				) +			} +			for _, q := range metric.Summary.Quantile { +				n, err = writeSample( +					name, metric, +					model.QuantileLabel, fmt.Sprint(q.GetQuantile()), +					q.GetValue(), +					out, +				) +				written += n +				if err != nil { +					return written, err +				} +			} +			n, err = writeSample( +				name+"_sum", metric, "", "", +				metric.Summary.GetSampleSum(), +				out, +			) +			if err != nil { +				return written, err +			} +			written += n +			n, err = writeSample( +				name+"_count", metric, "", "", +				float64(metric.Summary.GetSampleCount()), +				out, +			) +		case dto.MetricType_HISTOGRAM: +			if metric.Histogram == nil { +				return written, fmt.Errorf( +					"expected histogram in metric %s %s", name, metric, +				) +			} +			infSeen := false +			for _, q := range metric.Histogram.Bucket { +				n, err = writeSample( +					name+"_bucket", metric, +					model.BucketLabel, fmt.Sprint(q.GetUpperBound()), +					float64(q.GetCumulativeCount()), +					out, +				) +				written += n +				if err != nil { +					return written, err +				} +				if math.IsInf(q.GetUpperBound(), +1) { +					infSeen = true +				} +			} +			if !infSeen { +				n, err = writeSample( +					name+"_bucket", metric, +					model.BucketLabel, "+Inf", +					float64(metric.Histogram.GetSampleCount()), +					out, +				) +				if err != nil { +					return written, err +				} +				written += n +			} +			n, err = writeSample( +				name+"_sum", metric, "", "", +				metric.Histogram.GetSampleSum(), +				out, +			) +			if err != nil { +				return written, err +			} +			written += n +			n, err = writeSample( +				name+"_count", metric, "", "", +				float64(metric.Histogram.GetSampleCount()), +				out, +			) +		default: +			return written, fmt.Errorf( +				"unexpected type in metric %s %s", name, metric, +			) +		} +		written += n +		if err != nil { +			return written, err +		} +	} +	return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( +	name string, +	metric *dto.Metric, +	additionalLabelName, additionalLabelValue string, +	value float64, +	out io.Writer, +) (int, error) { +	var written int +	n, err := fmt.Fprint(out, name) +	written += n +	if err != nil { +		return written, err +	} +	n, err = labelPairsToText( +		metric.Label, +		additionalLabelName, additionalLabelValue, +		out, +	) +	written += n +	if err != nil { +		return written, err +	} +	n, err = fmt.Fprintf(out, " %v", value) +	written += n +	if err != nil { +		return written, err +	} +	if metric.TimestampMs != nil { +		n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) +		written += n +		if err != nil { +			return written, err +		} +	} +	n, err = out.Write([]byte{'\n'}) +	written += n +	if err != nil { +		return written, err +	} +	return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( +	in []*dto.LabelPair, +	additionalLabelName, additionalLabelValue string, +	out io.Writer, +) (int, error) { +	if len(in) == 0 && additionalLabelName == "" { +		return 0, nil +	} +	var written int +	separator := '{' +	for _, lp := range in { +		n, err := fmt.Fprintf( +			out, `%c%s="%s"`, +			separator, lp.GetName(), escapeString(lp.GetValue(), true), +		) +		written += n +		if err != nil { +			return written, err +		} +		separator = ',' +	} +	if additionalLabelName != "" { +		n, err := fmt.Fprintf( +			out, `%c%s="%s"`, +			separator, additionalLabelName, +			escapeString(additionalLabelValue, true), +		) +		written += n +		if err != nil { +			return written, err +		} +	} +	n, err := out.Write([]byte{'}'}) +	written += n +	if err != nil { +		return written, err +	} +	return written, nil +} + +var ( +	escape                = strings.NewReplacer("\\", `\\`, "\n", `\n`) +	escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { +	if includeDoubleQuote { +		return escapeWithDoubleQuote.Replace(v) +	} + +	return escape.Replace(v) +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 0000000..ef9a150 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,753 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( +	"bufio" +	"bytes" +	"fmt" +	"io" +	"math" +	"strconv" +	"strings" + +	dto "github.com/prometheus/client_model/go" + +	"github.com/golang/protobuf/proto" +	"github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { +	Line int +	Msg  string +} + +// Error implements the error interface. +func (e ParseError) Error() string { +	return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { +	metricFamiliesByName map[string]*dto.MetricFamily +	buf                  *bufio.Reader // Where the parsed input is read through. +	err                  error         // Most recent error. +	lineCount            int           // Tracks the line count for error messages. +	currentByte          byte          // The most recent byte read. +	currentToken         bytes.Buffer  // Re-used each time a token has to be gathered from multiple bytes. +	currentMF            *dto.MetricFamily +	currentMetric        *dto.Metric +	currentLabelPair     *dto.LabelPair + +	// The remaining member variables are only used for summaries/histograms. +	currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' +	// Summary specific. +	summaries       map[uint64]*dto.Metric // Key is created with LabelsToSignature. +	currentQuantile float64 +	// Histogram specific. +	histograms    map[uint64]*dto.Metric // Key is created with LabelsToSignature. +	currentBucket float64 +	// These tell us if the currently processed line ends on '_count' or +	// '_sum' respectively and belong to a summary/histogram, representing the sample +	// count and sum of that summary/histogram. +	currentIsSummaryCount, currentIsSummarySum     bool +	currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { +	p.reset(in) +	for nextState := p.startOfLine; nextState != nil; nextState = nextState() { +		// Magic happens here... +	} +	// Get rid of empty metric families. +	for k, mf := range p.metricFamiliesByName { +		if len(mf.GetMetric()) == 0 { +			delete(p.metricFamiliesByName, k) +		} +	} +	// If p.err is io.EOF now, we have run into a premature end of the input +	// stream. Turn this error into something nicer and more +	// meaningful. (io.EOF is often used as a signal for the legitimate end +	// of an input stream.) +	if p.err == io.EOF { +		p.parseError("unexpected end of input stream") +	} +	return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { +	p.metricFamiliesByName = map[string]*dto.MetricFamily{} +	if p.buf == nil { +		p.buf = bufio.NewReader(in) +	} else { +		p.buf.Reset(in) +	} +	p.err = nil +	p.lineCount = 0 +	if p.summaries == nil || len(p.summaries) > 0 { +		p.summaries = map[uint64]*dto.Metric{} +	} +	if p.histograms == nil || len(p.histograms) > 0 { +		p.histograms = map[uint64]*dto.Metric{} +	} +	p.currentQuantile = math.NaN() +	p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { +	p.lineCount++ +	if p.skipBlankTab(); p.err != nil { +		// End of input reached. This is the only case where +		// that is not an error but a signal that we are done. +		p.err = nil +		return nil +	} +	switch p.currentByte { +	case '#': +		return p.startComment +	case '\n': +		return p.startOfLine // Empty line, start the next one. +	} +	return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { +	if p.skipBlankTab(); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.currentByte == '\n' { +		return p.startOfLine +	} +	if p.readTokenUntilWhitespace(); p.err != nil { +		return nil // Unexpected end of input. +	} +	// If we have hit the end of line already, there is nothing left +	// to do. This is not considered a syntax error. +	if p.currentByte == '\n' { +		return p.startOfLine +	} +	keyword := p.currentToken.String() +	if keyword != "HELP" && keyword != "TYPE" { +		// Generic comment, ignore by fast forwarding to end of line. +		for p.currentByte != '\n' { +			if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { +				return nil // Unexpected end of input. +			} +		} +		return p.startOfLine +	} +	// There is something. Next has to be a metric name. +	if p.skipBlankTab(); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.readTokenAsMetricName(); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.currentByte == '\n' { +		// At the end of the line already. +		// Again, this is not considered a syntax error. +		return p.startOfLine +	} +	if !isBlankOrTab(p.currentByte) { +		p.parseError("invalid metric name in comment") +		return nil +	} +	p.setOrCreateCurrentMF() +	if p.skipBlankTab(); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.currentByte == '\n' { +		// At the end of the line already. +		// Again, this is not considered a syntax error. +		return p.startOfLine +	} +	switch keyword { +	case "HELP": +		return p.readingHelp +	case "TYPE": +		return p.readingType +	} +	panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { +	if p.readTokenAsMetricName(); p.err != nil { +		return nil +	} +	if p.currentToken.Len() == 0 { +		p.parseError("invalid metric name") +		return nil +	} +	p.setOrCreateCurrentMF() +	// Now is the time to fix the type if it hasn't happened yet. +	if p.currentMF.Type == nil { +		p.currentMF.Type = dto.MetricType_UNTYPED.Enum() +	} +	p.currentMetric = &dto.Metric{} +	// Do not append the newly created currentMetric to +	// currentMF.Metric right now. First wait if this is a summary, +	// and the metric exists already, which we can only know after +	// having read all the labels. +	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { +		return nil // Unexpected end of input. +	} +	return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { +	// Summaries/histograms are special. We have to reset the +	// currentLabels map, currentQuantile and currentBucket before starting to +	// read labels. +	if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { +		p.currentLabels = map[string]string{} +		p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() +		p.currentQuantile = math.NaN() +		p.currentBucket = math.NaN() +	} +	if p.currentByte != '{' { +		return p.readingValue +	} +	return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { +	if p.skipBlankTab(); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.currentByte == '}' { +		if p.skipBlankTab(); p.err != nil { +			return nil // Unexpected end of input. +		} +		return p.readingValue +	} +	if p.readTokenAsLabelName(); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.currentToken.Len() == 0 { +		p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) +		return nil +	} +	p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} +	if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { +		p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) +		return nil +	} +	// Special summary/histogram treatment. Don't add 'quantile' and 'le' +	// labels to 'real' labels. +	if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && +		!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { +		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) +	} +	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.currentByte != '=' { +		p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) +		return nil +	} +	return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { +	if p.skipBlankTab(); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.currentByte != '"' { +		p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) +		return nil +	} +	if p.readTokenAsLabelValue(); p.err != nil { +		return nil +	} +	p.currentLabelPair.Value = proto.String(p.currentToken.String()) +	// Special treatment of summaries: +	// - Quantile labels are special, will result in dto.Quantile later. +	// - Other labels have to be added to currentLabels for signature calculation. +	if p.currentMF.GetType() == dto.MetricType_SUMMARY { +		if p.currentLabelPair.GetName() == model.QuantileLabel { +			if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { +				// Create a more helpful error message. +				p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) +				return nil +			} +		} else { +			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() +		} +	} +	// Similar special treatment of histograms. +	if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { +		if p.currentLabelPair.GetName() == model.BucketLabel { +			if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { +				// Create a more helpful error message. +				p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) +				return nil +			} +		} else { +			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() +		} +	} +	if p.skipBlankTab(); p.err != nil { +		return nil // Unexpected end of input. +	} +	switch p.currentByte { +	case ',': +		return p.startLabelName + +	case '}': +		if p.skipBlankTab(); p.err != nil { +			return nil // Unexpected end of input. +		} +		return p.readingValue +	default: +		p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) +		return nil +	} +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { +	// When we are here, we have read all the labels, so for the +	// special case of a summary/histogram, we can finally find out +	// if the metric already exists. +	if p.currentMF.GetType() == dto.MetricType_SUMMARY { +		signature := model.LabelsToSignature(p.currentLabels) +		if summary := p.summaries[signature]; summary != nil { +			p.currentMetric = summary +		} else { +			p.summaries[signature] = p.currentMetric +			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) +		} +	} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { +		signature := model.LabelsToSignature(p.currentLabels) +		if histogram := p.histograms[signature]; histogram != nil { +			p.currentMetric = histogram +		} else { +			p.histograms[signature] = p.currentMetric +			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) +		} +	} else { +		p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) +	} +	if p.readTokenUntilWhitespace(); p.err != nil { +		return nil // Unexpected end of input. +	} +	value, err := strconv.ParseFloat(p.currentToken.String(), 64) +	if err != nil { +		// Create a more helpful error message. +		p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) +		return nil +	} +	switch p.currentMF.GetType() { +	case dto.MetricType_COUNTER: +		p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} +	case dto.MetricType_GAUGE: +		p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} +	case dto.MetricType_UNTYPED: +		p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} +	case dto.MetricType_SUMMARY: +		// *sigh* +		if p.currentMetric.Summary == nil { +			p.currentMetric.Summary = &dto.Summary{} +		} +		switch { +		case p.currentIsSummaryCount: +			p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) +		case p.currentIsSummarySum: +			p.currentMetric.Summary.SampleSum = proto.Float64(value) +		case !math.IsNaN(p.currentQuantile): +			p.currentMetric.Summary.Quantile = append( +				p.currentMetric.Summary.Quantile, +				&dto.Quantile{ +					Quantile: proto.Float64(p.currentQuantile), +					Value:    proto.Float64(value), +				}, +			) +		} +	case dto.MetricType_HISTOGRAM: +		// *sigh* +		if p.currentMetric.Histogram == nil { +			p.currentMetric.Histogram = &dto.Histogram{} +		} +		switch { +		case p.currentIsHistogramCount: +			p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) +		case p.currentIsHistogramSum: +			p.currentMetric.Histogram.SampleSum = proto.Float64(value) +		case !math.IsNaN(p.currentBucket): +			p.currentMetric.Histogram.Bucket = append( +				p.currentMetric.Histogram.Bucket, +				&dto.Bucket{ +					UpperBound:      proto.Float64(p.currentBucket), +					CumulativeCount: proto.Uint64(uint64(value)), +				}, +			) +		} +	default: +		p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) +	} +	if p.currentByte == '\n' { +		return p.startOfLine +	} +	return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { +	if p.skipBlankTab(); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.readTokenUntilWhitespace(); p.err != nil { +		return nil // Unexpected end of input. +	} +	timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) +	if err != nil { +		// Create a more helpful error message. +		p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) +		return nil +	} +	p.currentMetric.TimestampMs = proto.Int64(timestamp) +	if p.readTokenUntilNewline(false); p.err != nil { +		return nil // Unexpected end of input. +	} +	if p.currentToken.Len() > 0 { +		p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) +		return nil +	} +	return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { +	if p.currentMF.Help != nil { +		p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) +		return nil +	} +	// Rest of line is the docstring. +	if p.readTokenUntilNewline(true); p.err != nil { +		return nil // Unexpected end of input. +	} +	p.currentMF.Help = proto.String(p.currentToken.String()) +	return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { +	if p.currentMF.Type != nil { +		p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) +		return nil +	} +	// Rest of line is the type. +	if p.readTokenUntilNewline(false); p.err != nil { +		return nil // Unexpected end of input. +	} +	metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] +	if !ok { +		p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) +		return nil +	} +	p.currentMF.Type = dto.MetricType(metricType).Enum() +	return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { +	p.err = ParseError{ +		Line: p.lineCount, +		Msg:  msg, +	} +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { +	for { +		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { +			return +		} +	} +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { +	if isBlankOrTab(p.currentByte) { +		p.skipBlankTab() +	} +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken.  The +// first byte considered is the byte already read (now in p.currentByte).  The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { +	p.currentToken.Reset() +	for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { +		p.currentToken.WriteByte(p.currentByte) +		p.currentByte, p.err = p.buf.ReadByte() +	} +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken.  The first +// byte considered is the byte already read (now in p.currentByte).  The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { +	p.currentToken.Reset() +	escaped := false +	for p.err == nil { +		if recognizeEscapeSequence && escaped { +			switch p.currentByte { +			case '\\': +				p.currentToken.WriteByte(p.currentByte) +			case 'n': +				p.currentToken.WriteByte('\n') +			default: +				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) +				return +			} +			escaped = false +		} else { +			switch p.currentByte { +			case '\n': +				return +			case '\\': +				escaped = true +			default: +				p.currentToken.WriteByte(p.currentByte) +			} +		} +		p.currentByte, p.err = p.buf.ReadByte() +	} +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { +	p.currentToken.Reset() +	if !isValidMetricNameStart(p.currentByte) { +		return +	} +	for { +		p.currentToken.WriteByte(p.currentByte) +		p.currentByte, p.err = p.buf.ReadByte() +		if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { +			return +		} +	} +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { +	p.currentToken.Reset() +	if !isValidLabelNameStart(p.currentByte) { +		return +	} +	for { +		p.currentToken.WriteByte(p.currentByte) +		p.currentByte, p.err = p.buf.ReadByte() +		if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { +			return +		} +	} +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { +	p.currentToken.Reset() +	escaped := false +	for { +		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { +			return +		} +		if escaped { +			switch p.currentByte { +			case '"', '\\': +				p.currentToken.WriteByte(p.currentByte) +			case 'n': +				p.currentToken.WriteByte('\n') +			default: +				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) +				return +			} +			escaped = false +			continue +		} +		switch p.currentByte { +		case '"': +			return +		case '\n': +			p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) +			return +		case '\\': +			escaped = true +		default: +			p.currentToken.WriteByte(p.currentByte) +		} +	} +} + +func (p *TextParser) setOrCreateCurrentMF() { +	p.currentIsSummaryCount = false +	p.currentIsSummarySum = false +	p.currentIsHistogramCount = false +	p.currentIsHistogramSum = false +	name := p.currentToken.String() +	if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { +		return +	} +	// Try out if this is a _sum or _count for a summary/histogram. +	summaryName := summaryMetricName(name) +	if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { +		if p.currentMF.GetType() == dto.MetricType_SUMMARY { +			if isCount(name) { +				p.currentIsSummaryCount = true +			} +			if isSum(name) { +				p.currentIsSummarySum = true +			} +			return +		} +	} +	histogramName := histogramMetricName(name) +	if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { +		if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { +			if isCount(name) { +				p.currentIsHistogramCount = true +			} +			if isSum(name) { +				p.currentIsHistogramSum = true +			} +			return +		} +	} +	p.currentMF = &dto.MetricFamily{Name: proto.String(name)} +	p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { +	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { +	return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { +	return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { +	return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { +	return b == ' ' || b == '\t' +} + +func isCount(name string) bool { +	return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { +	return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { +	return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { +	switch { +	case isCount(name): +		return name[:len(name)-6] +	case isSum(name): +		return name[:len(name)-4] +	default: +		return name +	} +} + +func histogramMetricName(name string) string { +	switch { +	case isCount(name): +		return name[:len(name)-6] +	case isSum(name): +		return name[:len(name)-4] +	case isBucket(name): +		return name[:len(name)-7] +	default: +		return name +	} +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 0000000..7723656 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +    Redistributions of source code must retain the above copyright +    notice, this list of conditions and the following disclaimer. + +    Redistributions in binary form must reproduce the above copyright +    notice, this list of conditions and the following disclaimer in +    the documentation and/or other materials provided with the +    distribution. + +    Neither the name of the Open Knowledge Foundation Ltd. nor the +    names of its contributors may be used to endorse or promote +    products derived from this software without specific prior written +    permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { +    Type, SubType string +    Q             float32 +    Params        map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + +	.hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 0000000..648b38c --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +    Redistributions of source code must retain the above copyright +    notice, this list of conditions and the following disclaimer. + +    Redistributions in binary form must reproduce the above copyright +    notice, this list of conditions and the following disclaimer in +    the documentation and/or other materials provided with the +    distribution. + +    Neither the name of the Open Knowledge Foundation Ltd. nor the +    names of its contributors may be used to endorse or promote +    products derived from this software without specific prior written +    permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( +	"sort" +	"strconv" +	"strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { +	Type, SubType string +	Q             float64 +	Params        map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { +	slice := []Accept(accept) +	return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { +	slice := []Accept(accept) +	ai, aj := slice[i], slice[j] +	if ai.Q > aj.Q { +		return true +	} +	if ai.Type != "*" && aj.Type == "*" { +		return true +	} +	if ai.SubType != "*" && aj.SubType == "*" { +		return true +	} +	return false +} + +func (accept accept_slice) Swap(i, j int) { +	slice := []Accept(accept) +	slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { +	parts := strings.Split(header, ",") +	accept = make([]Accept, 0, len(parts)) +	for _, part := range parts { +		part := strings.Trim(part, " ") + +		a := Accept{} +		a.Params = make(map[string]string) +		a.Q = 1.0 + +		mrp := strings.Split(part, ";") + +		media_range := mrp[0] +		sp := strings.Split(media_range, "/") +		a.Type = strings.Trim(sp[0], " ") + +		switch { +		case len(sp) == 1 && a.Type == "*": +			a.SubType = "*" +		case len(sp) == 2: +			a.SubType = strings.Trim(sp[1], " ") +		default: +			continue +		} + +		if len(mrp) == 1 { +			accept = append(accept, a) +			continue +		} + +		for _, param := range mrp[1:] { +			sp := strings.SplitN(param, "=", 2) +			if len(sp) != 2 { +				continue +			} +			token := strings.Trim(sp[0], " ") +			if token == "q" { +				a.Q, _ = strconv.ParseFloat(sp[1], 32) +			} else { +				a.Params[token] = strings.Trim(sp[1], " ") +			} +		} + +		accept = append(accept, a) +	} + +	slice := accept_slice(accept) +	sort.Sort(slice) + +	return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { +	asp := make([][]string, 0, len(alternatives)) +	for _, ctype := range alternatives { +		asp = append(asp, strings.SplitN(ctype, "/", 2)) +	} +	for _, clause := range ParseAccept(header) { +		for i, ctsp := range asp { +			if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { +				content_type = alternatives[i] +				return +			} +			if clause.Type == ctsp[0] && clause.SubType == "*" { +				content_type = alternatives[i] +				return +			} +			if clause.Type == "*" && clause.SubType == "*" { +				content_type = alternatives[i] +				return +			} +		} +	} +	return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 0000000..35e739c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( +	"fmt" +	"time" +) + +type AlertStatus string + +const ( +	AlertFiring   AlertStatus = "firing" +	AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { +	// Label value pairs for purpose of aggregation, matching, and disposition +	// dispatching. This must minimally include an "alertname" label. +	Labels LabelSet `json:"labels"` + +	// Extra key/value information which does not define alert identity. +	Annotations LabelSet `json:"annotations"` + +	// The known time range for this alert. Both ends are optional. +	StartsAt     time.Time `json:"startsAt,omitempty"` +	EndsAt       time.Time `json:"endsAt,omitempty"` +	GeneratorURL string    `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { +	return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { +	return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { +	s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) +	if a.Resolved() { +		return s + "[resolved]" +	} +	return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { +	return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { +	if a.EndsAt.IsZero() { +		return false +	} +	return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { +	if a.Resolved() { +		return AlertResolved +	} +	return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { +	if a.StartsAt.IsZero() { +		return fmt.Errorf("start time missing") +	} +	if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { +		return fmt.Errorf("start time must be before end time") +	} +	if err := a.Labels.Validate(); err != nil { +		return fmt.Errorf("invalid label set: %s", err) +	} +	if len(a.Labels) == 0 { +		return fmt.Errorf("at least one label pair required") +	} +	if err := a.Annotations.Validate(); err != nil { +		return fmt.Errorf("invalid annotations: %s", err) +	} +	return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int      { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { +	if as[i].StartsAt.Before(as[j].StartsAt) { +		return true +	} +	if as[i].EndsAt.Before(as[j].EndsAt) { +		return true +	} +	return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { +	for _, a := range as { +		if !a.Resolved() { +			return true +		} +	} +	return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { +	if as.HasFiring() { +		return AlertFiring +	} +	return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 0000000..fc4de41 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( +	"fmt" +	"strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { +	num, err := strconv.ParseUint(s, 16, 64) +	return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { +	num, err := strconv.ParseUint(s, 16, 64) +	if err != nil { +		return 0, err +	} +	return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { +	return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { +	return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { +	return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { +	f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { +	if len(s) != len(o) { +		return false +	} + +	for k := range s { +		if _, ok := o[k]; !ok { +			return false +		} +	} + +	return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { +	myLength, otherLength := len(s), len(o) +	if myLength == 0 || otherLength == 0 { +		return FingerprintSet{} +	} + +	subSet := s +	superSet := o + +	if otherLength < myLength { +		subSet = o +		superSet = s +	} + +	out := FingerprintSet{} + +	for k := range subSet { +		if _, ok := superSet[k]; ok { +			out[k] = struct{}{} +		} +	} + +	return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 0000000..038fc1c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( +	offset64 = 14695981039346656037 +	prime64  = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { +	return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { +	for i := 0; i < len(s); i++ { +		h ^= uint64(s[i]) +		h *= prime64 +	} +	return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { +	h ^= uint64(b) +	h *= prime64 +	return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 0000000..41051a0 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( +	"encoding/json" +	"fmt" +	"regexp" +	"strings" +	"unicode/utf8" +) + +const ( +	// AlertNameLabel is the name of the label containing the an alert's name. +	AlertNameLabel = "alertname" + +	// ExportedLabelPrefix is the prefix to prepend to the label names present in +	// exported metrics if a label of the same name is added by the server. +	ExportedLabelPrefix = "exported_" + +	// MetricNameLabel is the label name indicating the metric name of a +	// timeseries. +	MetricNameLabel = "__name__" + +	// SchemeLabel is the name of the label that holds the scheme on which to +	// scrape a target. +	SchemeLabel = "__scheme__" + +	// AddressLabel is the name of the label that holds the address of +	// a scrape target. +	AddressLabel = "__address__" + +	// MetricsPathLabel is the name of the label that holds the path on which to +	// scrape a target. +	MetricsPathLabel = "__metrics_path__" + +	// ReservedLabelPrefix is a prefix which is not legal in user-supplied +	// label names. +	ReservedLabelPrefix = "__" + +	// MetaLabelPrefix is a prefix for labels that provide meta information. +	// Labels with this prefix are used for intermediate label processing and +	// will not be attached to time series. +	MetaLabelPrefix = "__meta_" + +	// TmpLabelPrefix is a prefix for temporary labels as part of relabelling. +	// Labels with this prefix are used for intermediate label processing and +	// will not be attached to time series. This is reserved for use in +	// Prometheus configuration files by users. +	TmpLabelPrefix = "__tmp_" + +	// ParamLabelPrefix is a prefix for labels that provide URL parameters +	// used to scrape a target. +	ParamLabelPrefix = "__param_" + +	// JobLabel is the label name indicating the job from which a timeseries +	// was scraped. +	JobLabel = "job" + +	// InstanceLabel is the label name used for the instance label. +	InstanceLabel = "instance" + +	// BucketLabel is used for the label that defines the upper bound of a +	// bucket of a histogram ("le" -> "less or equal"). +	BucketLabel = "le" + +	// QuantileLabel is used for the label that defines the quantile in a +	// summary. +	QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric.  It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { +	if len(ln) == 0 { +		return false +	} +	for i, b := range ln { +		if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { +			return false +		} +	} +	return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { +	var s string +	if err := unmarshal(&s); err != nil { +		return err +	} +	if !LabelName(s).IsValid() { +		return fmt.Errorf("%q is not a valid label name", s) +	} +	*ln = LabelName(s) +	return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { +	var s string +	if err := json.Unmarshal(b, &s); err != nil { +		return err +	} +	if !LabelName(s).IsValid() { +		return fmt.Errorf("%q is not a valid label name", s) +	} +	*ln = LabelName(s) +	return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { +	return len(l) +} + +func (l LabelNames) Less(i, j int) bool { +	return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { +	l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { +	labelStrings := make([]string, 0, len(l)) +	for _, label := range l { +		labelStrings = append(labelStrings, string(label)) +	} +	return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { +	return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { +	return len(l) +} + +func (l LabelValues) Less(i, j int) bool { +	return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { +	l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { +	Name  LabelName +	Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { +	return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { +	switch { +	case l[i].Name > l[j].Name: +		return false +	case l[i].Name < l[j].Name: +		return true +	case l[i].Value > l[j].Value: +		return false +	case l[i].Value < l[j].Value: +		return true +	default: +		return false +	} +} + +func (l LabelPairs) Swap(i, j int) { +	l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 0000000..6eda08a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( +	"encoding/json" +	"fmt" +	"sort" +	"strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs.  The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not.  All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { +	for ln, lv := range ls { +		if !ln.IsValid() { +			return fmt.Errorf("invalid name %q", ln) +		} +		if !lv.IsValid() { +			return fmt.Errorf("invalid value %q", lv) +		} +	} +	return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { +	if len(ls) != len(o) { +		return false +	} +	for ln, lv := range ls { +		olv, ok := o[ln] +		if !ok { +			return false +		} +		if olv != lv { +			return false +		} +	} +	return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { +	if len(ls) < len(o) { +		return true +	} +	if len(ls) > len(o) { +		return false +	} + +	lns := make(LabelNames, 0, len(ls)+len(o)) +	for ln := range ls { +		lns = append(lns, ln) +	} +	for ln := range o { +		lns = append(lns, ln) +	} +	// It's probably not worth it to de-dup lns. +	sort.Sort(lns) +	for _, ln := range lns { +		mlv, ok := ls[ln] +		if !ok { +			return true +		} +		olv, ok := o[ln] +		if !ok { +			return false +		} +		if mlv < olv { +			return true +		} +		if mlv > olv { +			return false +		} +	} +	return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { +	lsn := make(LabelSet, len(ls)) +	for ln, lv := range ls { +		lsn[ln] = lv +	} +	return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { +	result := make(LabelSet, len(l)) + +	for k, v := range l { +		result[k] = v +	} + +	for k, v := range other { +		result[k] = v +	} + +	return result +} + +func (l LabelSet) String() string { +	lstrs := make([]string, 0, len(l)) +	for l, v := range l { +		lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) +	} + +	sort.Strings(lstrs) +	return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { +	return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { +	return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { +	var m map[LabelName]LabelValue +	if err := json.Unmarshal(b, &m); err != nil { +		return err +	} +	// encoding/json only unmarshals maps of the form map[string]T. It treats +	// LabelName as a string and does not call its UnmarshalJSON method. +	// Thus, we have to replicate the behavior here. +	for ln := range m { +		if !ln.IsValid() { +			return fmt.Errorf("%q is not a valid label name", ln) +		} +	} +	*l = LabelSet(m) +	return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 0000000..f725090 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,103 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( +	"fmt" +	"regexp" +	"sort" +	"strings" +) + +var ( +	separator = []byte{0} +	// MetricNameRE is a regular expression matching valid metric +	// names. Note that the IsValidMetricName function performs the same +	// check but faster than a match with this regular expression. +	MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { +	return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { +	return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { +	clone := make(Metric, len(m)) +	for k, v := range m { +		clone[k] = v +	} +	return clone +} + +func (m Metric) String() string { +	metricName, hasName := m[MetricNameLabel] +	numLabels := len(m) - 1 +	if !hasName { +		numLabels = len(m) +	} +	labelStrings := make([]string, 0, numLabels) +	for label, value := range m { +		if label != MetricNameLabel { +			labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) +		} +	} + +	switch numLabels { +	case 0: +		if hasName { +			return string(metricName) +		} +		return "{}" +	default: +		sort.Strings(labelStrings) +		return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) +	} +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { +	return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { +	return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { +	if len(n) == 0 { +		return false +	} +	for i, b := range n { +		if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { +			return false +		} +	} +	return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 0000000..a7b9691 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 0000000..8762b13 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( +	"sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( +	// cache the signature of an empty label set. +	emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { +	if len(labels) == 0 { +		return emptyLabelSignature +	} + +	labelNames := make([]string, 0, len(labels)) +	for labelName := range labels { +		labelNames = append(labelNames, labelName) +	} +	sort.Strings(labelNames) + +	sum := hashNew() +	for _, labelName := range labelNames { +		sum = hashAdd(sum, labelName) +		sum = hashAddByte(sum, SeparatorByte) +		sum = hashAdd(sum, labels[labelName]) +		sum = hashAddByte(sum, SeparatorByte) +	} +	return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { +	if len(ls) == 0 { +		return Fingerprint(emptyLabelSignature) +	} + +	labelNames := make(LabelNames, 0, len(ls)) +	for labelName := range ls { +		labelNames = append(labelNames, labelName) +	} +	sort.Sort(labelNames) + +	sum := hashNew() +	for _, labelName := range labelNames { +		sum = hashAdd(sum, string(labelName)) +		sum = hashAddByte(sum, SeparatorByte) +		sum = hashAdd(sum, string(ls[labelName])) +		sum = hashAddByte(sum, SeparatorByte) +	} +	return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { +	if len(ls) == 0 { +		return Fingerprint(emptyLabelSignature) +	} + +	var result uint64 +	for labelName, labelValue := range ls { +		sum := hashNew() +		sum = hashAdd(sum, string(labelName)) +		sum = hashAddByte(sum, SeparatorByte) +		sum = hashAdd(sum, string(labelValue)) +		result ^= sum +	} +	return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { +	if len(labels) == 0 { +		return emptyLabelSignature +	} + +	sort.Sort(LabelNames(labels)) + +	sum := hashNew() +	for _, label := range labels { +		sum = hashAdd(sum, string(label)) +		sum = hashAddByte(sum, SeparatorByte) +		sum = hashAdd(sum, string(m[label])) +		sum = hashAddByte(sum, SeparatorByte) +	} +	return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { +	if len(m) == 0 { +		return emptyLabelSignature +	} + +	labelNames := make(LabelNames, 0, len(m)) +	for labelName := range m { +		if _, exclude := labels[labelName]; !exclude { +			labelNames = append(labelNames, labelName) +		} +	} +	if len(labelNames) == 0 { +		return emptyLabelSignature +	} +	sort.Sort(labelNames) + +	sum := hashNew() +	for _, labelName := range labelNames { +		sum = hashAdd(sum, string(labelName)) +		sum = hashAddByte(sum, SeparatorByte) +		sum = hashAdd(sum, string(m[labelName])) +		sum = hashAddByte(sum, SeparatorByte) +	} +	return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 0000000..7538e29 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( +	"encoding/json" +	"fmt" +	"regexp" +	"time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { +	Name    LabelName `json:"name"` +	Value   string    `json:"value"` +	IsRegex bool      `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { +	type plain Matcher +	if err := json.Unmarshal(b, (*plain)(m)); err != nil { +		return err +	} + +	if len(m.Name) == 0 { +		return fmt.Errorf("label name in matcher must not be empty") +	} +	if m.IsRegex { +		if _, err := regexp.Compile(m.Value); err != nil { +			return err +		} +	} +	return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { +	if !m.Name.IsValid() { +		return fmt.Errorf("invalid name %q", m.Name) +	} +	if m.IsRegex { +		if _, err := regexp.Compile(m.Value); err != nil { +			return fmt.Errorf("invalid regular expression %q", m.Value) +		} +	} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { +		return fmt.Errorf("invalid value %q", m.Value) +	} +	return nil +} + +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. +type Silence struct { +	ID uint64 `json:"id,omitempty"` + +	Matchers []*Matcher `json:"matchers"` + +	StartsAt time.Time `json:"startsAt"` +	EndsAt   time.Time `json:"endsAt"` + +	CreatedAt time.Time `json:"createdAt,omitempty"` +	CreatedBy string    `json:"createdBy"` +	Comment   string    `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { +	if len(s.Matchers) == 0 { +		return fmt.Errorf("at least one matcher required") +	} +	for _, m := range s.Matchers { +		if err := m.Validate(); err != nil { +			return fmt.Errorf("invalid matcher: %s", err) +		} +	} +	if s.StartsAt.IsZero() { +		return fmt.Errorf("start time missing") +	} +	if s.EndsAt.IsZero() { +		return fmt.Errorf("end time missing") +	} +	if s.EndsAt.Before(s.StartsAt) { +		return fmt.Errorf("start time must be before end time") +	} +	if s.CreatedBy == "" { +		return fmt.Errorf("creator information missing") +	} +	if s.Comment == "" { +		return fmt.Errorf("comment missing") +	} +	if s.CreatedAt.IsZero() { +		return fmt.Errorf("creation timestamp missing") +	} +	return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 0000000..548968a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,249 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( +	"fmt" +	"math" +	"regexp" +	"strconv" +	"strings" +	"time" +) + +const ( +	// MinimumTick is the minimum supported time resolution. This has to be +	// at least time.Second in order for the code below to work. +	minimumTick = time.Millisecond +	// second is the Time duration equivalent to one second. +	second = int64(time.Second / minimumTick) +	// The number of nanoseconds per minimum tick. +	nanosPerTick = int64(minimumTick / time.Nanosecond) + +	// Earliest is the earliest Time representable. Handy for +	// initializing a high watermark. +	Earliest = Time(math.MinInt64) +	// Latest is the latest Time representable. Handy for initializing +	// a low watermark. +	Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { +	Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { +	return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { +	return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { +	return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { +	return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { +	return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { +	return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { +	return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { +	return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { +	return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { +	return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { +	return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { +	return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { +	return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { +	p := strings.Split(string(b), ".") +	switch len(p) { +	case 1: +		v, err := strconv.ParseInt(string(p[0]), 10, 64) +		if err != nil { +			return err +		} +		*t = Time(v * second) + +	case 2: +		v, err := strconv.ParseInt(string(p[0]), 10, 64) +		if err != nil { +			return err +		} +		v *= second + +		prec := dotPrecision - len(p[1]) +		if prec < 0 { +			p[1] = p[1][:dotPrecision] +		} else if prec > 0 { +			p[1] = p[1] + strings.Repeat("0", prec) +		} + +		va, err := strconv.ParseInt(p[1], 10, 32) +		if err != nil { +			return err +		} + +		*t = Time(v + va) + +	default: +		return fmt.Errorf("invalid time %q", string(b)) +	} +	return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// StringToDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { +	matches := durationRE.FindStringSubmatch(durationStr) +	if len(matches) != 3 { +		return 0, fmt.Errorf("not a valid duration string: %q", durationStr) +	} +	var ( +		n, _ = strconv.Atoi(matches[1]) +		dur  = time.Duration(n) * time.Millisecond +	) +	switch unit := matches[2]; unit { +	case "y": +		dur *= 1000 * 60 * 60 * 24 * 365 +	case "w": +		dur *= 1000 * 60 * 60 * 24 * 7 +	case "d": +		dur *= 1000 * 60 * 60 * 24 +	case "h": +		dur *= 1000 * 60 * 60 +	case "m": +		dur *= 1000 * 60 +	case "s": +		dur *= 1000 +	case "ms": +		// Value already correct +	default: +		return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) +	} +	return Duration(dur), nil +} + +func (d Duration) String() string { +	var ( +		ms   = int64(time.Duration(d) / time.Millisecond) +		unit = "ms" +	) +	factors := map[string]int64{ +		"y":  1000 * 60 * 60 * 24 * 365, +		"w":  1000 * 60 * 60 * 24 * 7, +		"d":  1000 * 60 * 60 * 24, +		"h":  1000 * 60 * 60, +		"m":  1000 * 60, +		"s":  1000, +		"ms": 1, +	} + +	switch int64(0) { +	case ms % factors["y"]: +		unit = "y" +	case ms % factors["w"]: +		unit = "w" +	case ms % factors["d"]: +		unit = "d" +	case ms % factors["h"]: +		unit = "h" +	case ms % factors["m"]: +		unit = "m" +	case ms % factors["s"]: +		unit = "s" +	} +	return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { +	return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { +	var s string +	if err := unmarshal(&s); err != nil { +		return err +	} +	dur, err := ParseDuration(s) +	if err != nil { +		return err +	} +	*d = dur +	return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 0000000..7728aba --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,419 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( +	"encoding/json" +	"fmt" +	"math" +	"sort" +	"strconv" +	"strings" +) + +var ( +	// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a +	// non-existing sample pair. It is a SamplePair with timestamp Earliest and +	// value 0.0. Note that the natural zero value of SamplePair has a timestamp +	// of 0, which is possible to appear in a real SamplePair and thus not +	// suitable to signal a non-existing SamplePair. +	ZeroSamplePair = SamplePair{Timestamp: Earliest} + +	// ZeroSample is the pseudo zero-value of Sample used to signal a +	// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, +	// and metric nil. Note that the natural zero value of Sample has a timestamp +	// of 0, which is possible to appear in a real Sample and thus not suitable +	// to signal a non-existing Sample. +	ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { +	return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { +	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { +		return fmt.Errorf("sample value must be a quoted string") +	} +	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) +	if err != nil { +		return err +	} +	*v = SampleValue(f) +	return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { +	if v == o { +		return true +	} +	return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { +	return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { +	Timestamp Time +	Value     SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { +	t, err := json.Marshal(s.Timestamp) +	if err != nil { +		return nil, err +	} +	v, err := json.Marshal(s.Value) +	if err != nil { +		return nil, err +	} +	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { +	v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} +	return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { +	return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { +	return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { +	Metric    Metric      `json:"metric"` +	Value     SampleValue `json:"value"` +	Timestamp Time        `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// sematics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { +	if s == o { +		return true +	} + +	if !s.Metric.Equal(o.Metric) { +		return false +	} +	if !s.Timestamp.Equal(o.Timestamp) { +		return false +	} +	if s.Value.Equal(o.Value) { +		return false +	} + +	return true +} + +func (s Sample) String() string { +	return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ +		Timestamp: s.Timestamp, +		Value:     s.Value, +	}) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { +	v := struct { +		Metric Metric     `json:"metric"` +		Value  SamplePair `json:"value"` +	}{ +		Metric: s.Metric, +		Value: SamplePair{ +			Timestamp: s.Timestamp, +			Value:     s.Value, +		}, +	} + +	return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { +	v := struct { +		Metric Metric     `json:"metric"` +		Value  SamplePair `json:"value"` +	}{ +		Metric: s.Metric, +		Value: SamplePair{ +			Timestamp: s.Timestamp, +			Value:     s.Value, +		}, +	} + +	if err := json.Unmarshal(b, &v); err != nil { +		return err +	} + +	s.Metric = v.Metric +	s.Timestamp = v.Value.Timestamp +	s.Value = v.Value.Value + +	return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { +	return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { +	switch { +	case s[i].Metric.Before(s[j].Metric): +		return true +	case s[j].Metric.Before(s[i].Metric): +		return false +	case s[i].Timestamp.Before(s[j].Timestamp): +		return true +	default: +		return false +	} +} + +func (s Samples) Swap(i, j int) { +	s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { +	if len(s) != len(o) { +		return false +	} + +	for i, sample := range s { +		if !sample.Equal(o[i]) { +			return false +		} +	} +	return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { +	Metric Metric       `json:"metric"` +	Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { +	vals := make([]string, len(ss.Values)) +	for i, v := range ss.Values { +		vals[i] = v.String() +	} +	return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { +	Type() ValueType +	String() string +} + +func (Matrix) Type() ValueType  { return ValMatrix } +func (Vector) Type() ValueType  { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( +	ValNone ValueType = iota +	ValScalar +	ValVector +	ValMatrix +	ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { +	return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { +	var s string +	if err := json.Unmarshal(b, &s); err != nil { +		return err +	} +	switch s { +	case "<ValNone>": +		*et = ValNone +	case "scalar": +		*et = ValScalar +	case "vector": +		*et = ValVector +	case "matrix": +		*et = ValMatrix +	case "string": +		*et = ValString +	default: +		return fmt.Errorf("unknown value type %q", s) +	} +	return nil +} + +func (e ValueType) String() string { +	switch e { +	case ValNone: +		return "<ValNone>" +	case ValScalar: +		return "scalar" +	case ValVector: +		return "vector" +	case ValMatrix: +		return "matrix" +	case ValString: +		return "string" +	} +	panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { +	Value     SampleValue `json:"value"` +	Timestamp Time        `json:"timestamp"` +} + +func (s Scalar) String() string { +	return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { +	v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) +	return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { +	var f string +	v := [...]interface{}{&s.Timestamp, &f} + +	if err := json.Unmarshal(b, &v); err != nil { +		return err +	} + +	value, err := strconv.ParseFloat(f, 64) +	if err != nil { +		return fmt.Errorf("error parsing sample value: %s", err) +	} +	s.Value = SampleValue(value) +	return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { +	Value     string `json:"value"` +	Timestamp Time   `json:"timestamp"` +} + +func (s *String) String() string { +	return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { +	return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { +	v := [...]interface{}{&s.Timestamp, &s.Value} +	return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { +	entries := make([]string, len(vec)) +	for i, s := range vec { +		entries[i] = s.String() +	} +	return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int      { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { +	switch { +	case vec[i].Metric.Before(vec[j].Metric): +		return true +	case vec[j].Metric.Before(vec[i].Metric): +		return false +	case vec[i].Timestamp.Before(vec[j].Timestamp): +		return true +	default: +		return false +	} +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { +	if len(vec) != len(o) { +		return false +	} + +	for i, sample := range vec { +		if !sample.Equal(o[i]) { +			return false +		} +	} +	return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int           { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int)      { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { +	matCp := make(Matrix, len(mat)) +	copy(matCp, mat) +	sort.Sort(matCp) + +	strs := make([]string, len(matCp)) + +	for i, ss := range matCp { +		strs[i] = ss.String() +	} + +	return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md new file mode 100644 index 0000000..d558635 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/AUTHORS.md @@ -0,0 +1,21 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Tobias Schmidt <ts@soundcloud.com> + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Armen Baghumian <abaghumian@noggin.com.au> +* Bjoern Rabenstein <beorn@soundcloud.com> +* David Cournapeau <cournape@gmail.com> +* Ji-Hoon, Seol <jihoon.seol@gmail.com> +* Jonas Große Sundrup <cherti@letopolis.de> +* Julius Volz <julius.volz@gmail.com> +* Matt Layher <mdlayher@gmail.com> +* Matthias Rampke <mr@soundcloud.com> +* Nicky Gerritsen <nicky@streamone.nl> +* Rémi Audebert <contact@halfr.net> +* Tobias Schmidt <tobidt@gmail.com> diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 0000000..5705f0f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull +  request, addressing (with `@...`) one or more of the maintainers +  (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas +  on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). +  This will avoid unnecessary work and surely give you and us a good deal +  of inspiration. + +* Relevant coding style guidelines are the [Go Code Review +  Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) +  and the _Formatting and style_ section of Peter Bourgon's [Go: Best +  Practices for Production +  Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ +                                 Apache License +                           Version 2.0, January 2004 +                        http://www.apache.org/licenses/ + +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +   1. Definitions. + +      "License" shall mean the terms and conditions for use, reproduction, +      and distribution as defined by Sections 1 through 9 of this document. + +      "Licensor" shall mean the copyright owner or entity authorized by +      the copyright owner that is granting the License. + +      "Legal Entity" shall mean the union of the acting entity and all +      other entities that control, are controlled by, or are under common +      control with that entity. For the purposes of this definition, +      "control" means (i) the power, direct or indirect, to cause the +      direction or management of such entity, whether by contract or +      otherwise, or (ii) ownership of fifty percent (50%) or more of the +      outstanding shares, or (iii) beneficial ownership of such entity. + +      "You" (or "Your") shall mean an individual or Legal Entity +      exercising permissions granted by this License. + +      "Source" form shall mean the preferred form for making modifications, +      including but not limited to software source code, documentation +      source, and configuration files. + +      "Object" form shall mean any form resulting from mechanical +      transformation or translation of a Source form, including but +      not limited to compiled object code, generated documentation, +      and conversions to other media types. + +      "Work" shall mean the work of authorship, whether in Source or +      Object form, made available under the License, as indicated by a +      copyright notice that is included in or attached to the work +      (an example is provided in the Appendix below). + +      "Derivative Works" shall mean any work, whether in Source or Object +      form, that is based on (or derived from) the Work and for which the +      editorial revisions, annotations, elaborations, or other modifications +      represent, as a whole, an original work of authorship. For the purposes +      of this License, Derivative Works shall not include works that remain +      separable from, or merely link (or bind by name) to the interfaces of, +      the Work and Derivative Works thereof. + +      "Contribution" shall mean any work of authorship, including +      the original version of the Work and any modifications or additions +      to that Work or Derivative Works thereof, that is intentionally +      submitted to Licensor for inclusion in the Work by the copyright owner +      or by an individual or Legal Entity authorized to submit on behalf of +      the copyright owner. For the purposes of this definition, "submitted" +      means any form of electronic, verbal, or written communication sent +      to the Licensor or its representatives, including but not limited to +      communication on electronic mailing lists, source code control systems, +      and issue tracking systems that are managed by, or on behalf of, the +      Licensor for the purpose of discussing and improving the Work, but +      excluding communication that is conspicuously marked or otherwise +      designated in writing by the copyright owner as "Not a Contribution." + +      "Contributor" shall mean Licensor and any individual or Legal Entity +      on behalf of whom a Contribution has been received by Licensor and +      subsequently incorporated within the Work. + +   2. Grant of Copyright License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      copyright license to reproduce, prepare Derivative Works of, +      publicly display, publicly perform, sublicense, and distribute the +      Work and such Derivative Works in Source or Object form. + +   3. Grant of Patent License. Subject to the terms and conditions of +      this License, each Contributor hereby grants to You a perpetual, +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable +      (except as stated in this section) patent license to make, have made, +      use, offer to sell, sell, import, and otherwise transfer the Work, +      where such license applies only to those patent claims licensable +      by such Contributor that are necessarily infringed by their +      Contribution(s) alone or by combination of their Contribution(s) +      with the Work to which such Contribution(s) was submitted. If You +      institute patent litigation against any entity (including a +      cross-claim or counterclaim in a lawsuit) alleging that the Work +      or a Contribution incorporated within the Work constitutes direct +      or contributory patent infringement, then any patent licenses +      granted to You under this License for that Work shall terminate +      as of the date such litigation is filed. + +   4. Redistribution. You may reproduce and distribute copies of the +      Work or Derivative Works thereof in any medium, with or without +      modifications, and in Source or Object form, provided that You +      meet the following conditions: + +      (a) You must give any other recipients of the Work or +          Derivative Works a copy of this License; and + +      (b) You must cause any modified files to carry prominent notices +          stating that You changed the files; and + +      (c) You must retain, in the Source form of any Derivative Works +          that You distribute, all copyright, patent, trademark, and +          attribution notices from the Source form of the Work, +          excluding those notices that do not pertain to any part of +          the Derivative Works; and + +      (d) If the Work includes a "NOTICE" text file as part of its +          distribution, then any Derivative Works that You distribute must +          include a readable copy of the attribution notices contained +          within such NOTICE file, excluding those notices that do not +          pertain to any part of the Derivative Works, in at least one +          of the following places: within a NOTICE text file distributed +          as part of the Derivative Works; within the Source form or +          documentation, if provided along with the Derivative Works; or, +          within a display generated by the Derivative Works, if and +          wherever such third-party notices normally appear. The contents +          of the NOTICE file are for informational purposes only and +          do not modify the License. You may add Your own attribution +          notices within Derivative Works that You distribute, alongside +          or as an addendum to the NOTICE text from the Work, provided +          that such additional attribution notices cannot be construed +          as modifying the License. + +      You may add Your own copyright statement to Your modifications and +      may provide additional or different license terms and conditions +      for use, reproduction, or distribution of Your modifications, or +      for any such Derivative Works as a whole, provided Your use, +      reproduction, and distribution of the Work otherwise complies with +      the conditions stated in this License. + +   5. Submission of Contributions. Unless You explicitly state otherwise, +      any Contribution intentionally submitted for inclusion in the Work +      by You to the Licensor shall be under the terms and conditions of +      this License, without any additional terms or conditions. +      Notwithstanding the above, nothing herein shall supersede or modify +      the terms of any separate license agreement you may have executed +      with Licensor regarding such Contributions. + +   6. Trademarks. This License does not grant permission to use the trade +      names, trademarks, service marks, or product names of the Licensor, +      except as required for reasonable and customary use in describing the +      origin of the Work and reproducing the content of the NOTICE file. + +   7. Disclaimer of Warranty. Unless required by applicable law or +      agreed to in writing, Licensor provides the Work (and each +      Contributor provides its Contributions) on an "AS IS" BASIS, +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +      implied, including, without limitation, any warranties or conditions +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +      PARTICULAR PURPOSE. You are solely responsible for determining the +      appropriateness of using or redistributing the Work and assume any +      risks associated with Your exercise of permissions under this License. + +   8. Limitation of Liability. In no event and under no legal theory, +      whether in tort (including negligence), contract, or otherwise, +      unless required by applicable law (such as deliberate and grossly +      negligent acts) or agreed to in writing, shall any Contributor be +      liable to You for damages, including any direct, indirect, special, +      incidental, or consequential damages of any character arising as a +      result of this License or out of the use or inability to use the +      Work (including but not limited to damages for loss of goodwill, +      work stoppage, computer failure or malfunction, or any and all +      other commercial damages or losses), even if such Contributor +      has been advised of the possibility of such damages. + +   9. Accepting Warranty or Additional Liability. While redistributing +      the Work or Derivative Works thereof, You may choose to offer, +      and charge a fee for, acceptance of support, warranty, indemnity, +      or other liability obligations and/or rights consistent with this +      License. However, in accepting such obligations, You may act only +      on Your own behalf and on Your sole responsibility, not on behalf +      of any other Contributor, and only if You agree to indemnify, +      defend, and hold each Contributor harmless for any liability +      incurred by, or claims asserted against, such Contributor by reason +      of your accepting any such warranty or additional liability. + +   END OF TERMS AND CONDITIONS + +   APPENDIX: How to apply the Apache License to your work. + +      To apply the Apache License to your work, attach the following +      boilerplate notice, with the fields enclosed by brackets "[]" +      replaced with your own identifying information. (Don't include +      the brackets!)  The text should be enclosed in the appropriate +      comment syntax for the file format. We also recommend that a +      file or class name and description of purpose be included on the +      same "printed page" as the copyright notice for easier +      identification within third-party archives. + +   Copyright [yyyy] [name of copyright owner] + +   Licensed under the Apache License, Version 2.0 (the "License"); +   you may not use this file except in compliance with the License. +   You may obtain a copy of the License at + +       http://www.apache.org/licenses/LICENSE-2.0 + +   Unless required by applicable law or agreed to in writing, software +   distributed under the License is distributed on an "AS IS" BASIS, +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +   See the License for the specific language governing permissions and +   limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 0000000..c264a49 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,6 @@ +ci: +	! gofmt -l *.go | read nothing +	go vet +	go test -v ./... +	go get github.com/golang/lint/golint +	golint *.go diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 0000000..53c5e9a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 0000000..6e7ee6b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,10 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[](https://godoc.org/github.com/prometheus/procfs) +[](https://travis-ci.org/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 0000000..e2acd6d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +//    package main +// +//    import ( +//    	"fmt" +//    	"log" +// +//    	"github.com/prometheus/procfs" +//    ) +// +//    func main() { +//    	p, err := procfs.Self() +//    	if err != nil { +//    		log.Fatalf("could not get process: %s", err) +//    	} +// +//    	stat, err := p.NewStat() +//    	if err != nil { +//    		log.Fatalf("could not get process stat: %s", err) +//    	} +// +//    	fmt.Printf("command:  %s\n", stat.Comm) +//    	fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +//    	fmt.Printf("vsize:    %dB\n", stat.VirtualMemory()) +//    	fmt.Printf("rss:      %dB\n", stat.ResidentMemory()) +//    } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 0000000..49aaab0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,33 @@ +package procfs + +import ( +	"fmt" +	"os" +	"path" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { +	info, err := os.Stat(mountPoint) +	if err != nil { +		return "", fmt.Errorf("could not read %s: %s", mountPoint, err) +	} +	if !info.IsDir() { +		return "", fmt.Errorf("mount point %s is not a directory", mountPoint) +	} + +	return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { +	return path.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 0000000..e7012f7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,224 @@ +package procfs + +import ( +	"bufio" +	"encoding/hex" +	"errors" +	"fmt" +	"io" +	"io/ioutil" +	"net" +	"os" +	"strconv" +	"strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { +	// Total count of connections. +	Connections uint64 +	// Total incoming packages processed. +	IncomingPackets uint64 +	// Total outgoing packages processed. +	OutgoingPackets uint64 +	// Total incoming traffic. +	IncomingBytes uint64 +	// Total outgoing traffic. +	OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { +	// The local (virtual) IP address. +	LocalAddress net.IP +	// The local (virtual) port. +	LocalPort uint16 +	// The transport protocol (TCP, UDP). +	Proto string +	// The remote (real) IP address. +	RemoteAddress net.IP +	// The remote (real) port. +	RemotePort uint16 +	// The current number of active connections for this virtual/real address pair. +	ActiveConn uint64 +	// The current number of inactive connections for this virtual/real address pair. +	InactConn uint64 +	// The current weight of this virtual/real address pair. +	Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { +	fs, err := NewFS(DefaultMountPoint) +	if err != nil { +		return IPVSStats{}, err +	} + +	return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { +	file, err := os.Open(fs.Path("net/ip_vs_stats")) +	if err != nil { +		return IPVSStats{}, err +	} +	defer file.Close() + +	return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { +	var ( +		statContent []byte +		statLines   []string +		statFields  []string +		stats       IPVSStats +	) + +	statContent, err := ioutil.ReadAll(file) +	if err != nil { +		return IPVSStats{}, err +	} + +	statLines = strings.SplitN(string(statContent), "\n", 4) +	if len(statLines) != 4 { +		return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") +	} + +	statFields = strings.Fields(statLines[2]) +	if len(statFields) != 5 { +		return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") +	} + +	stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) +	if err != nil { +		return IPVSStats{}, err +	} +	stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) +	if err != nil { +		return IPVSStats{}, err +	} +	stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) +	if err != nil { +		return IPVSStats{}, err +	} +	stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) +	if err != nil { +		return IPVSStats{}, err +	} +	stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) +	if err != nil { +		return IPVSStats{}, err +	} + +	return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { +	fs, err := NewFS(DefaultMountPoint) +	if err != nil { +		return []IPVSBackendStatus{}, err +	} + +	return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { +	file, err := os.Open(fs.Path("net/ip_vs")) +	if err != nil { +		return nil, err +	} +	defer file.Close() + +	return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { +	var ( +		status       []IPVSBackendStatus +		scanner      = bufio.NewScanner(file) +		proto        string +		localAddress net.IP +		localPort    uint16 +		err          error +	) + +	for scanner.Scan() { +		fields := strings.Fields(string(scanner.Text())) +		if len(fields) == 0 { +			continue +		} +		switch { +		case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": +			continue +		case fields[0] == "TCP" || fields[0] == "UDP": +			if len(fields) < 2 { +				continue +			} +			proto = fields[0] +			localAddress, localPort, err = parseIPPort(fields[1]) +			if err != nil { +				return nil, err +			} +		case fields[0] == "->": +			if len(fields) < 6 { +				continue +			} +			remoteAddress, remotePort, err := parseIPPort(fields[1]) +			if err != nil { +				return nil, err +			} +			weight, err := strconv.ParseUint(fields[3], 10, 64) +			if err != nil { +				return nil, err +			} +			activeConn, err := strconv.ParseUint(fields[4], 10, 64) +			if err != nil { +				return nil, err +			} +			inactConn, err := strconv.ParseUint(fields[5], 10, 64) +			if err != nil { +				return nil, err +			} +			status = append(status, IPVSBackendStatus{ +				LocalAddress:  localAddress, +				LocalPort:     localPort, +				RemoteAddress: remoteAddress, +				RemotePort:    remotePort, +				Proto:         proto, +				Weight:        weight, +				ActiveConn:    activeConn, +				InactConn:     inactConn, +			}) +		} +	} +	return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { +	tmp := strings.SplitN(s, ":", 2) + +	if len(tmp) != 2 { +		return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) +	} + +	if len(tmp[0]) != 8 && len(tmp[0]) != 32 { +		return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) +	} + +	ip, err := hex.DecodeString(tmp[0]) +	if err != nil { +		return nil, 0, err +	} + +	port, err := strconv.ParseUint(tmp[1], 16, 16) +	if err != nil { +		return nil, 0, err +	} + +	return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 0000000..d7a248c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,138 @@ +package procfs + +import ( +	"fmt" +	"io/ioutil" +	"regexp" +	"strconv" +	"strings" +) + +var ( +	statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) +	buildlineRE  = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { +	// Name of the device. +	Name string +	// activity-state of the device. +	ActivityState string +	// Number of active disks. +	DisksActive int64 +	// Total number of disks the device consists of. +	DisksTotal int64 +	// Number of blocks the device holds. +	BlocksTotal int64 +	// Number of blocks on the device that are in sync. +	BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { +	mdStatusFilePath := fs.Path("mdstat") +	content, err := ioutil.ReadFile(mdStatusFilePath) +	if err != nil { +		return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) +	} + +	mdStates := []MDStat{} +	lines := strings.Split(string(content), "\n") +	for i, l := range lines { +		if l == "" { +			continue +		} +		if l[0] == ' ' { +			continue +		} +		if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { +			continue +		} + +		mainLine := strings.Split(l, " ") +		if len(mainLine) < 3 { +			return mdStates, fmt.Errorf("error parsing mdline: %s", l) +		} +		mdName := mainLine[0] +		activityState := mainLine[2] + +		if len(lines) <= i+3 { +			return mdStates, fmt.Errorf( +				"error parsing %s: too few lines for md device %s", +				mdStatusFilePath, +				mdName, +			) +		} + +		active, total, size, err := evalStatusline(lines[i+1]) +		if err != nil { +			return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) +		} + +		// j is the line number of the syncing-line. +		j := i + 2 +		if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line +			j = i + 3 +		} + +		// If device is syncing at the moment, get the number of currently +		// synced bytes, otherwise that number equals the size of the device. +		syncedBlocks := size +		if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { +			syncedBlocks, err = evalBuildline(lines[j]) +			if err != nil { +				return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) +			} +		} + +		mdStates = append(mdStates, MDStat{ +			Name:          mdName, +			ActivityState: activityState, +			DisksActive:   active, +			DisksTotal:    total, +			BlocksTotal:   size, +			BlocksSynced:  syncedBlocks, +		}) +	} + +	return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { +	matches := statuslineRE.FindStringSubmatch(statusline) +	if len(matches) != 4 { +		return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) +	} + +	size, err = strconv.ParseInt(matches[1], 10, 64) +	if err != nil { +		return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) +	} + +	total, err = strconv.ParseInt(matches[2], 10, 64) +	if err != nil { +		return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) +	} + +	active, err = strconv.ParseInt(matches[3], 10, 64) +	if err != nil { +		return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) +	} + +	return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { +	matches := buildlineRE.FindStringSubmatch(buildline) +	if len(matches) != 2 { +		return 0, fmt.Errorf("unexpected buildline: %s", buildline) +	} + +	syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) +	if err != nil { +		return 0, fmt.Errorf("%s in buildline: %s", err, buildline) +	} + +	return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 0000000..47ab0a7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,552 @@ +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +//   https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( +	"bufio" +	"fmt" +	"io" +	"strconv" +	"strings" +	"time" +) + +// Constants shared between multiple functions. +const ( +	deviceEntryLen = 8 + +	fieldBytesLen  = 8 +	fieldEventsLen = 27 + +	statVersion10 = "1.0" +	statVersion11 = "1.1" + +	fieldTransport10Len = 10 +	fieldTransport11Len = 13 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { +	// Name of the device. +	Device string +	// The mount point of the device. +	Mount string +	// The filesystem type used by the device. +	Type string +	// If available additional statistics related to this Mount. +	// Use a type assertion to determine if additional statistics are available. +	Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { +	mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { +	// The version of statistics provided. +	StatVersion string +	// The age of the NFS mount. +	Age time.Duration +	// Statistics related to byte counters for various operations. +	Bytes NFSBytesStats +	// Statistics related to various NFS event occurrences. +	Events NFSEventsStats +	// Statistics broken down by filesystem operation. +	Operations []NFSOperationStats +	// Statistics about the NFS RPC transport. +	Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { +	// Number of bytes read using the read() syscall. +	Read uint64 +	// Number of bytes written using the write() syscall. +	Write uint64 +	// Number of bytes read using the read() syscall in O_DIRECT mode. +	DirectRead uint64 +	// Number of bytes written using the write() syscall in O_DIRECT mode. +	DirectWrite uint64 +	// Number of bytes read from the NFS server, in total. +	ReadTotal uint64 +	// Number of bytes written to the NFS server, in total. +	WriteTotal uint64 +	// Number of pages read directly via mmap()'d files. +	ReadPages uint64 +	// Number of pages written directly via mmap()'d files. +	WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { +	// Number of times cached inode attributes are re-validated from the server. +	InodeRevalidate uint64 +	// Number of times cached dentry nodes are re-validated from the server. +	DnodeRevalidate uint64 +	// Number of times an inode cache is cleared. +	DataInvalidate uint64 +	// Number of times cached inode attributes are invalidated. +	AttributeInvalidate uint64 +	// Number of times files or directories have been open()'d. +	VFSOpen uint64 +	// Number of times a directory lookup has occurred. +	VFSLookup uint64 +	// Number of times permissions have been checked. +	VFSAccess uint64 +	// Number of updates (and potential writes) to pages. +	VFSUpdatePage uint64 +	// Number of pages read directly via mmap()'d files. +	VFSReadPage uint64 +	// Number of times a group of pages have been read. +	VFSReadPages uint64 +	// Number of pages written directly via mmap()'d files. +	VFSWritePage uint64 +	// Number of times a group of pages have been written. +	VFSWritePages uint64 +	// Number of times directory entries have been read with getdents(). +	VFSGetdents uint64 +	// Number of times attributes have been set on inodes. +	VFSSetattr uint64 +	// Number of pending writes that have been forcefully flushed to the server. +	VFSFlush uint64 +	// Number of times fsync() has been called on directories and files. +	VFSFsync uint64 +	// Number of times locking has been attemped on a file. +	VFSLock uint64 +	// Number of times files have been closed and released. +	VFSFileRelease uint64 +	// Unknown.  Possibly unused. +	CongestionWait uint64 +	// Number of times files have been truncated. +	Truncation uint64 +	// Number of times a file has been grown due to writes beyond its existing end. +	WriteExtension uint64 +	// Number of times a file was removed while still open by another process. +	SillyRename uint64 +	// Number of times the NFS server gave less data than expected while reading. +	ShortRead uint64 +	// Number of times the NFS server wrote less data than expected while writing. +	ShortWrite uint64 +	// Number of times the NFS server indicated EJUKEBOX; retrieving data from +	// offline storage. +	JukeboxDelay uint64 +	// Number of NFS v4.1+ pNFS reads. +	PNFSRead uint64 +	// Number of NFS v4.1+ pNFS writes. +	PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { +	// The name of the operation. +	Operation string +	// Number of requests performed for this operation. +	Requests uint64 +	// Number of times an actual RPC request has been transmitted for this operation. +	Transmissions uint64 +	// Number of times a request has had a major timeout. +	MajorTimeouts uint64 +	// Number of bytes sent for this operation, including RPC headers and payload. +	BytesSent uint64 +	// Number of bytes received for this operation, including RPC headers and payload. +	BytesReceived uint64 +	// Duration all requests spent queued for transmission before they were sent. +	CumulativeQueueTime time.Duration +	// Duration it took to get a reply back after the request was transmitted. +	CumulativeTotalResponseTime time.Duration +	// Duration from when a request was enqueued to when it was completely handled. +	CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { +	// The local port used for the NFS mount. +	Port uint64 +	// Number of times the client has had to establish a connection from scratch +	// to the NFS server. +	Bind uint64 +	// Number of times the client has made a TCP connection to the NFS server. +	Connect uint64 +	// Duration (in jiffies, a kernel internal unit of time) the NFS mount has +	// spent waiting for connections to the server to be established. +	ConnectIdleTime uint64 +	// Duration since the NFS mount last saw any RPC traffic. +	IdleTime time.Duration +	// Number of RPC requests for this mount sent to the NFS server. +	Sends uint64 +	// Number of RPC responses for this mount received from the NFS server. +	Receives uint64 +	// Number of times the NFS server sent a response with a transaction ID +	// unknown to this client. +	BadTransactionIDs uint64 +	// A running counter, incremented on each request as the current difference +	// ebetween sends and receives. +	CumulativeActiveRequests uint64 +	// A running counter, incremented on each request by the current backlog +	// queue size. +	CumulativeBacklog uint64 + +	// Stats below only available with stat version 1.1. + +	// Maximum number of simultaneously active RPC requests ever used. +	MaximumRPCSlotsUsed uint64 +	// A running counter, incremented on each request as the current size of the +	// sending queue. +	CumulativeSendingQueue uint64 +	// A running counter, incremented on each request as the current size of the +	// pending queue. +	CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { +	const ( +		device            = "device" +		statVersionPrefix = "statvers=" + +		nfs3Type = "nfs" +		nfs4Type = "nfs4" +	) + +	var mounts []*Mount + +	s := bufio.NewScanner(r) +	for s.Scan() { +		// Only look for device entries in this function +		ss := strings.Fields(string(s.Bytes())) +		if len(ss) == 0 || ss[0] != device { +			continue +		} + +		m, err := parseMount(ss) +		if err != nil { +			return nil, err +		} + +		// Does this mount also possess statistics information? +		if len(ss) > deviceEntryLen { +			// Only NFSv3 and v4 are supported for parsing statistics +			if m.Type != nfs3Type && m.Type != nfs4Type { +				return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) +			} + +			statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + +			stats, err := parseMountStatsNFS(s, statVersion) +			if err != nil { +				return nil, err +			} + +			m.Stats = stats +		} + +		mounts = append(mounts, m) +	} + +	return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +//   device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { +	if len(ss) < deviceEntryLen { +		return nil, fmt.Errorf("invalid device entry: %v", ss) +	} + +	// Check for specific words appearing at specific indices to ensure +	// the format is consistent with what we expect +	format := []struct { +		i int +		s string +	}{ +		{i: 0, s: "device"}, +		{i: 2, s: "mounted"}, +		{i: 3, s: "on"}, +		{i: 5, s: "with"}, +		{i: 6, s: "fstype"}, +	} + +	for _, f := range format { +		if ss[f.i] != f.s { +			return nil, fmt.Errorf("invalid device entry: %v", ss) +		} +	} + +	return &Mount{ +		Device: ss[1], +		Mount:  ss[4], +		Type:   ss[7], +	}, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { +	// Field indicators for parsing specific types of data +	const ( +		fieldAge        = "age:" +		fieldBytes      = "bytes:" +		fieldEvents     = "events:" +		fieldPerOpStats = "per-op" +		fieldTransport  = "xprt:" +	) + +	stats := &MountStatsNFS{ +		StatVersion: statVersion, +	} + +	for s.Scan() { +		ss := strings.Fields(string(s.Bytes())) +		if len(ss) == 0 { +			break +		} +		if len(ss) < 2 { +			return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) +		} + +		switch ss[0] { +		case fieldAge: +			// Age integer is in seconds +			d, err := time.ParseDuration(ss[1] + "s") +			if err != nil { +				return nil, err +			} + +			stats.Age = d +		case fieldBytes: +			bstats, err := parseNFSBytesStats(ss[1:]) +			if err != nil { +				return nil, err +			} + +			stats.Bytes = *bstats +		case fieldEvents: +			estats, err := parseNFSEventsStats(ss[1:]) +			if err != nil { +				return nil, err +			} + +			stats.Events = *estats +		case fieldTransport: +			if len(ss) < 3 { +				return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) +			} + +			tstats, err := parseNFSTransportStats(ss[2:], statVersion) +			if err != nil { +				return nil, err +			} + +			stats.Transport = *tstats +		} + +		// When encountering "per-operation statistics", we must break this +		// loop and parse them seperately to ensure we can terminate parsing +		// before reaching another device entry; hence why this 'if' statement +		// is not just another switch case +		if ss[0] == fieldPerOpStats { +			break +		} +	} + +	if err := s.Err(); err != nil { +		return nil, err +	} + +	// NFS per-operation stats appear last before the next device entry +	perOpStats, err := parseNFSOperationStats(s) +	if err != nil { +		return nil, err +	} + +	stats.Operations = perOpStats + +	return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { +	if len(ss) != fieldBytesLen { +		return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) +	} + +	ns := make([]uint64, 0, fieldBytesLen) +	for _, s := range ss { +		n, err := strconv.ParseUint(s, 10, 64) +		if err != nil { +			return nil, err +		} + +		ns = append(ns, n) +	} + +	return &NFSBytesStats{ +		Read:        ns[0], +		Write:       ns[1], +		DirectRead:  ns[2], +		DirectWrite: ns[3], +		ReadTotal:   ns[4], +		WriteTotal:  ns[5], +		ReadPages:   ns[6], +		WritePages:  ns[7], +	}, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { +	if len(ss) != fieldEventsLen { +		return nil, fmt.Errorf("invalid NFS events stats: %v", ss) +	} + +	ns := make([]uint64, 0, fieldEventsLen) +	for _, s := range ss { +		n, err := strconv.ParseUint(s, 10, 64) +		if err != nil { +			return nil, err +		} + +		ns = append(ns, n) +	} + +	return &NFSEventsStats{ +		InodeRevalidate:     ns[0], +		DnodeRevalidate:     ns[1], +		DataInvalidate:      ns[2], +		AttributeInvalidate: ns[3], +		VFSOpen:             ns[4], +		VFSLookup:           ns[5], +		VFSAccess:           ns[6], +		VFSUpdatePage:       ns[7], +		VFSReadPage:         ns[8], +		VFSReadPages:        ns[9], +		VFSWritePage:        ns[10], +		VFSWritePages:       ns[11], +		VFSGetdents:         ns[12], +		VFSSetattr:          ns[13], +		VFSFlush:            ns[14], +		VFSFsync:            ns[15], +		VFSLock:             ns[16], +		VFSFileRelease:      ns[17], +		CongestionWait:      ns[18], +		Truncation:          ns[19], +		WriteExtension:      ns[20], +		SillyRename:         ns[21], +		ShortRead:           ns[22], +		ShortWrite:          ns[23], +		JukeboxDelay:        ns[24], +		PNFSRead:            ns[25], +		PNFSWrite:           ns[26], +	}, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { +	const ( +		// Number of expected fields in each per-operation statistics set +		numFields = 9 +	) + +	var ops []NFSOperationStats + +	for s.Scan() { +		ss := strings.Fields(string(s.Bytes())) +		if len(ss) == 0 { +			// Must break when reading a blank line after per-operation stats to +			// enable top-level function to parse the next device entry +			break +		} + +		if len(ss) != numFields { +			return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) +		} + +		// Skip string operation name for integers +		ns := make([]uint64, 0, numFields-1) +		for _, st := range ss[1:] { +			n, err := strconv.ParseUint(st, 10, 64) +			if err != nil { +				return nil, err +			} + +			ns = append(ns, n) +		} + +		ops = append(ops, NFSOperationStats{ +			Operation:                   strings.TrimSuffix(ss[0], ":"), +			Requests:                    ns[0], +			Transmissions:               ns[1], +			MajorTimeouts:               ns[2], +			BytesSent:                   ns[3], +			BytesReceived:               ns[4], +			CumulativeQueueTime:         time.Duration(ns[5]) * time.Millisecond, +			CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, +			CumulativeTotalRequestTime:  time.Duration(ns[7]) * time.Millisecond, +		}) +	} + +	return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { +	switch statVersion { +	case statVersion10: +		if len(ss) != fieldTransport10Len { +			return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) +		} +	case statVersion11: +		if len(ss) != fieldTransport11Len { +			return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) +		} +	default: +		return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) +	} + +	// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay +	// in a v1.0 response +	ns := make([]uint64, 0, fieldTransport11Len) +	for _, s := range ss { +		n, err := strconv.ParseUint(s, 10, 64) +		if err != nil { +			return nil, err +		} + +		ns = append(ns, n) +	} + +	return &NFSTransportStats{ +		Port:                     ns[0], +		Bind:                     ns[1], +		Connect:                  ns[2], +		ConnectIdleTime:          ns[3], +		IdleTime:                 time.Duration(ns[4]) * time.Second, +		Sends:                    ns[5], +		Receives:                 ns[6], +		BadTransactionIDs:        ns[7], +		CumulativeActiveRequests: ns[8], +		CumulativeBacklog:        ns[9], +		MaximumRPCSlotsUsed:      ns[10], +		CumulativeSendingQueue:   ns[11], +		CumulativePendingQueue:   ns[12], +	}, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 0000000..8717e1f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,224 @@ +package procfs + +import ( +	"fmt" +	"io/ioutil" +	"os" +	"strconv" +	"strings" +) + +// Proc provides information about a running process. +type Proc struct { +	// The process ID. +	PID int + +	fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int           { return len(p) } +func (p Procs) Swap(i, j int)      { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { +	fs, err := NewFS(DefaultMountPoint) +	if err != nil { +		return Proc{}, err +	} +	return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { +	fs, err := NewFS(DefaultMountPoint) +	if err != nil { +		return Proc{}, err +	} +	return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { +	fs, err := NewFS(DefaultMountPoint) +	if err != nil { +		return Procs{}, err +	} +	return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { +	p, err := os.Readlink(fs.Path("self")) +	if err != nil { +		return Proc{}, err +	} +	pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) +	if err != nil { +		return Proc{}, err +	} +	return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { +	if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { +		return Proc{}, err +	} +	return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { +	d, err := os.Open(fs.Path()) +	if err != nil { +		return Procs{}, err +	} +	defer d.Close() + +	names, err := d.Readdirnames(-1) +	if err != nil { +		return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) +	} + +	p := Procs{} +	for _, n := range names { +		pid, err := strconv.ParseInt(n, 10, 64) +		if err != nil { +			continue +		} +		p = append(p, Proc{PID: int(pid), fs: fs}) +	} + +	return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { +	f, err := os.Open(p.path("cmdline")) +	if err != nil { +		return nil, err +	} +	defer f.Close() + +	data, err := ioutil.ReadAll(f) +	if err != nil { +		return nil, err +	} + +	if len(data) < 1 { +		return []string{}, nil +	} + +	return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { +	f, err := os.Open(p.path("comm")) +	if err != nil { +		return "", err +	} +	defer f.Close() + +	data, err := ioutil.ReadAll(f) +	if err != nil { +		return "", err +	} + +	return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { +	exe, err := os.Readlink(p.path("exe")) +	if os.IsNotExist(err) { +		return "", nil +	} + +	return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { +	names, err := p.fileDescriptors() +	if err != nil { +		return nil, err +	} + +	fds := make([]uintptr, len(names)) +	for i, n := range names { +		fd, err := strconv.ParseInt(n, 10, 32) +		if err != nil { +			return nil, fmt.Errorf("could not parse fd %s: %s", n, err) +		} +		fds[i] = uintptr(fd) +	} + +	return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { +	names, err := p.fileDescriptors() +	if err != nil { +		return nil, err +	} + +	targets := make([]string, len(names)) + +	for i, name := range names { +		target, err := os.Readlink(p.path("fd", name)) +		if err == nil { +			targets[i] = target +		} +	} + +	return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { +	fds, err := p.fileDescriptors() +	if err != nil { +		return 0, err +	} + +	return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { +	f, err := os.Open(p.path("mountstats")) +	if err != nil { +		return nil, err +	} +	defer f.Close() + +	return parseMountStats(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { +	d, err := os.Open(p.path("fd")) +	if err != nil { +		return nil, err +	} +	defer d.Close() + +	names, err := d.Readdirnames(-1) +	if err != nil { +		return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) +	} + +	return names, nil +} + +func (p Proc) path(pa ...string) string { +	return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 0000000..b4e31d7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,55 @@ +package procfs + +import ( +	"fmt" +	"io/ioutil" +	"os" +) + +// ProcIO models the content of /proc/<pid>/io. +type ProcIO struct { +	// Chars read. +	RChar uint64 +	// Chars written. +	WChar uint64 +	// Read syscalls. +	SyscR uint64 +	// Write syscalls. +	SyscW uint64 +	// Bytes read. +	ReadBytes uint64 +	// Bytes written. +	WriteBytes uint64 +	// Bytes written, but taking into account truncation. See +	// Documentation/filesystems/proc.txt in the kernel sources for +	// detailed explanation. +	CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { +	pio := ProcIO{} + +	f, err := os.Open(p.path("io")) +	if err != nil { +		return pio, err +	} +	defer f.Close() + +	data, err := ioutil.ReadAll(f) +	if err != nil { +		return pio, err +	} + +	ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + +		"read_bytes: %d\nwrite_bytes: %d\n" + +		"cancelled_write_bytes: %d\n" + +	_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, +		&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) +	if err != nil { +		return pio, err +	} + +	return pio, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 0000000..2df997c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,137 @@ +package procfs + +import ( +	"bufio" +	"fmt" +	"os" +	"regexp" +	"strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { +	// CPU time limit in seconds. +	CPUTime int +	// Maximum size of files that the process may create. +	FileSize int +	// Maximum size of the process's data segment (initialized data, +	// uninitialized data, and heap). +	DataSize int +	// Maximum size of the process stack in bytes. +	StackSize int +	// Maximum size of a core file. +	CoreFileSize int +	// Limit of the process's resident set in pages. +	ResidentSet int +	// Maximum number of processes that can be created for the real user ID of +	// the calling process. +	Processes int +	// Value one greater than the maximum file descriptor number that can be +	// opened by this process. +	OpenFiles int +	// Maximum number of bytes of memory that may be locked into RAM. +	LockedMemory int +	// Maximum size of the process's virtual memory address space in bytes. +	AddressSpace int +	// Limit on the combined number of flock(2) locks and fcntl(2) leases that +	// this process may establish. +	FileLocks int +	// Limit of signals that may be queued for the real user ID of the calling +	// process. +	PendingSignals int +	// Limit on the number of bytes that can be allocated for POSIX message +	// queues for the real user ID of the calling process. +	MsqqueueSize int +	// Limit of the nice priority set using setpriority(2) or nice(2). +	NicePriority int +	// Limit of the real-time priority set using sched_setscheduler(2) or +	// sched_setparam(2). +	RealtimePriority int +	// Limit (in microseconds) on the amount of CPU time that a process +	// scheduled under a real-time scheduling policy may consume without making +	// a blocking system call. +	RealtimeTimeout int +} + +const ( +	limitsFields    = 3 +	limitsUnlimited = "unlimited" +) + +var ( +	limitsDelimiter = regexp.MustCompile("  +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { +	f, err := os.Open(p.path("limits")) +	if err != nil { +		return ProcLimits{}, err +	} +	defer f.Close() + +	var ( +		l = ProcLimits{} +		s = bufio.NewScanner(f) +	) +	for s.Scan() { +		fields := limitsDelimiter.Split(s.Text(), limitsFields) +		if len(fields) != limitsFields { +			return ProcLimits{}, fmt.Errorf( +				"couldn't parse %s line %s", f.Name(), s.Text()) +		} + +		switch fields[0] { +		case "Max cpu time": +			l.CPUTime, err = parseInt(fields[1]) +		case "Max file size": +			l.FileSize, err = parseInt(fields[1]) +		case "Max data size": +			l.DataSize, err = parseInt(fields[1]) +		case "Max stack size": +			l.StackSize, err = parseInt(fields[1]) +		case "Max core file size": +			l.CoreFileSize, err = parseInt(fields[1]) +		case "Max resident set": +			l.ResidentSet, err = parseInt(fields[1]) +		case "Max processes": +			l.Processes, err = parseInt(fields[1]) +		case "Max open files": +			l.OpenFiles, err = parseInt(fields[1]) +		case "Max locked memory": +			l.LockedMemory, err = parseInt(fields[1]) +		case "Max address space": +			l.AddressSpace, err = parseInt(fields[1]) +		case "Max file locks": +			l.FileLocks, err = parseInt(fields[1]) +		case "Max pending signals": +			l.PendingSignals, err = parseInt(fields[1]) +		case "Max msgqueue size": +			l.MsqqueueSize, err = parseInt(fields[1]) +		case "Max nice priority": +			l.NicePriority, err = parseInt(fields[1]) +		case "Max realtime priority": +			l.RealtimePriority, err = parseInt(fields[1]) +		case "Max realtime timeout": +			l.RealtimeTimeout, err = parseInt(fields[1]) +		} +		if err != nil { +			return ProcLimits{}, err +		} +	} + +	return l, s.Err() +} + +func parseInt(s string) (int, error) { +	if s == limitsUnlimited { +		return -1, nil +	} +	i, err := strconv.ParseInt(s, 10, 32) +	if err != nil { +		return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) +	} +	return int(i), nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 0000000..724e271 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,175 @@ +package procfs + +import ( +	"bytes" +	"fmt" +	"io/ioutil" +	"os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic.  After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { +	// The process ID. +	PID int +	// The filename of the executable. +	Comm string +	// The process state. +	State string +	// The PID of the parent of this process. +	PPID int +	// The process group ID of the process. +	PGRP int +	// The session ID of the process. +	Session int +	// The controlling terminal of the process. +	TTY int +	// The ID of the foreground process group of the controlling terminal of +	// the process. +	TPGID int +	// The kernel flags word of the process. +	Flags uint +	// The number of minor faults the process has made which have not required +	// loading a memory page from disk. +	MinFlt uint +	// The number of minor faults that the process's waited-for children have +	// made. +	CMinFlt uint +	// The number of major faults the process has made which have required +	// loading a memory page from disk. +	MajFlt uint +	// The number of major faults that the process's waited-for children have +	// made. +	CMajFlt uint +	// Amount of time that this process has been scheduled in user mode, +	// measured in clock ticks. +	UTime uint +	// Amount of time that this process has been scheduled in kernel mode, +	// measured in clock ticks. +	STime uint +	// Amount of time that this process's waited-for children have been +	// scheduled in user mode, measured in clock ticks. +	CUTime uint +	// Amount of time that this process's waited-for children have been +	// scheduled in kernel mode, measured in clock ticks. +	CSTime uint +	// For processes running a real-time scheduling policy, this is the negated +	// scheduling priority, minus one. +	Priority int +	// The nice value, a value in the range 19 (low priority) to -20 (high +	// priority). +	Nice int +	// Number of threads in this process. +	NumThreads int +	// The time the process started after system boot, the value is expressed +	// in clock ticks. +	Starttime uint64 +	// Virtual memory size in bytes. +	VSize int +	// Resident set size in pages. +	RSS int + +	fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { +	f, err := os.Open(p.path("stat")) +	if err != nil { +		return ProcStat{}, err +	} +	defer f.Close() + +	data, err := ioutil.ReadAll(f) +	if err != nil { +		return ProcStat{}, err +	} + +	var ( +		ignore int + +		s = ProcStat{PID: p.PID, fs: p.fs} +		l = bytes.Index(data, []byte("(")) +		r = bytes.LastIndex(data, []byte(")")) +	) + +	if l < 0 || r < 0 { +		return ProcStat{}, fmt.Errorf( +			"unexpected format, couldn't extract comm: %s", +			data, +		) +	} + +	s.Comm = string(data[l+1 : r]) +	_, err = fmt.Fscan( +		bytes.NewBuffer(data[r+2:]), +		&s.State, +		&s.PPID, +		&s.PGRP, +		&s.Session, +		&s.TTY, +		&s.TPGID, +		&s.Flags, +		&s.MinFlt, +		&s.CMinFlt, +		&s.MajFlt, +		&s.CMajFlt, +		&s.UTime, +		&s.STime, +		&s.CUTime, +		&s.CSTime, +		&s.Priority, +		&s.Nice, +		&s.NumThreads, +		&ignore, +		&s.Starttime, +		&s.VSize, +		&s.RSS, +	) +	if err != nil { +		return ProcStat{}, err +	} + +	return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { +	return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { +	return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { +	stat, err := s.fs.NewStat() +	if err != nil { +		return 0, err +	} +	return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { +	return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 0000000..1ca217e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,56 @@ +package procfs + +import ( +	"bufio" +	"fmt" +	"os" +	"strconv" +	"strings" +) + +// Stat represents kernel/system statistics. +type Stat struct { +	// Boot time in seconds since the Epoch. +	BootTime int64 +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { +	fs, err := NewFS(DefaultMountPoint) +	if err != nil { +		return Stat{}, err +	} + +	return fs.NewStat() +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { +	f, err := os.Open(fs.Path("stat")) +	if err != nil { +		return Stat{}, err +	} +	defer f.Close() + +	s := bufio.NewScanner(f) +	for s.Scan() { +		line := s.Text() +		if !strings.HasPrefix(line, "btime") { +			continue +		} +		fields := strings.Fields(line) +		if len(fields) != 2 { +			return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) +		} +		i, err := strconv.ParseInt(fields[1], 10, 32) +		if err != nil { +			return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) +		} +		return Stat{BootTime: i}, nil +	} +	if err := s.Err(); err != nil { +		return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) +	} + +	return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) +}  | 
