summaryrefslogtreecommitdiff
path: root/util/metrics/reporter.go
blob: 20d2ea53e5b6da22f39d55db14c4f13fd8521ee8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
// Copyright (C) 2019-2024 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand.  If not, see <https://www.gnu.org/licenses/>.

// Package metrics provides a metric logging wrappers for Prometheus server.
package metrics

import (
	"context"
	"net"
	"net/http"
	"os"
	"path/filepath"
	"regexp"
	"strings"
	"time"
	// logging imports metrics so that we can have metrics about logging, which is more important than the four Debug lines we had here logging about metrics. TODO: find a more clever cycle resolution
	//"github.com/algorand/go-algorand/logging"
)

const (
	nodeExporterMetricsPath    = "/metrics"
	nodeExporterSyncAddr       = ":38086"
	nodeExporterRedirectOutput = false
)

// MetricReporter represent a single running metric server instance
type MetricReporter struct {
	serviceConfig     ServiceConfig
	nextAttempt       time.Time
	gatherInterval    time.Duration
	lastMetricsBuffer strings.Builder
	formattedLabels   string
	neSync            net.Listener // we will use this "locked" network port listener to syncronize which of the algod processes invokes the node_exporter
	neProcess         *os.Process  // a pointer to the node exporter process.
}

// MakeMetricReporter creates a new metrics server at the given endpoint.
func MakeMetricReporter(serviceConfig ServiceConfig) *MetricReporter {
	reporter := &MetricReporter{
		serviceConfig:  serviceConfig,
		gatherInterval: time.Duration(0),
	}
	reporter.createFormattedLabels()
	return reporter
}

func (reporter *MetricReporter) createFormattedLabels() {
	var buf strings.Builder
	if len(reporter.serviceConfig.Labels) == 0 {
		return
	}
	for k, v := range reporter.serviceConfig.Labels {
		buf.WriteString("," + k + "=\"" + v + "\"")
	}

	reporter.formattedLabels = buf.String()[1:]
}

// ReporterLoop is the main reporter loop. It waits until it receives a feedback from the node-exporter regarding the desired post-interval.
// then, it posts the collected metrics every such and such interval. Note that the context is used to "abort" this thread if needed.
func (reporter *MetricReporter) ReporterLoop(ctx context.Context) {
	defer reporter.tryDetachNodeExporter()
	reporter.nextAttempt = time.Now()
	for {
		// perform a small delay or wait until context expires.
		if !reporter.waitForTimeStamp(ctx) {
			// context expired, abort.
			return
		}
		// collect the metrics, but only once we've established a sampling rate.
		if reporter.gatherInterval != time.Duration(0) {
			reporter.gatherMetrics()
		}
		// post the collected metrics and retrieve sampling rate.
		if !reporter.postGatheredMetrics(ctx) {
			// context expired, abort.
			return
		}
		// was the gatherInterval updated during postGatheredMetrics ?
		// ( the server reply include a header "SampleRate" which is used to update gatherInterval variable)
		if reporter.gatherInterval == time.Duration(0) {
			// wait arbitrary 2 seconds before keep going.
			reporter.nextAttempt = time.Now().Add(time.Duration(2) * time.Second)
		} else {
			// figure out when is the next time we're going to update the collected metrics.
			reporter.nextAttempt = time.Now().Add(reporter.gatherInterval)
		}
	}
}

// waitForTimeStamp blocks until the timestamp in nextAttempt arrives ( return true ) or the context expires ( return false ).
func (reporter *MetricReporter) waitForTimeStamp(ctx context.Context) bool {
	now := time.Now()
	if now.After(reporter.nextAttempt) {
		// we've already surpassed the time when we need to post again.
		return true
	}
	waitTime := reporter.nextAttempt.Sub(now)
	waitTimer := time.NewTimer(waitTime)
	select {
	case <-ctx.Done():
		waitTimer.Stop()
		return false
	case <-waitTimer.C:
		return true
	}
}

func (reporter *MetricReporter) gatherMetrics() {
	var buf strings.Builder
	DefaultRegistry().WriteMetrics(&buf, reporter.formattedLabels)
	reporter.lastMetricsBuffer = buf
}

func (reporter *MetricReporter) postGatheredMetrics(ctx context.Context) bool {
	request, err := http.NewRequest("POST", "http://"+reporter.serviceConfig.NodeExporterListenAddress+nodeExporterMetricsPath, strings.NewReader(reporter.lastMetricsBuffer.String()))
	if err != nil {
		// logging.Base().Debugf("Unable to post metrics to '%s'; error : '%v'", reporter.serviceConfig.NodeExporterListenAddress, err)
		return true
	}
	request = request.WithContext(ctx)
	var client http.Client
	resp, err := client.Do(request)
	if err == nil {
		reporter.parseSampleRate(resp)
	} else {
		// did we fail due to context expiration ?
		if ctx.Err() != nil {
			// we failed due to context reason.
			return false
		}
		// there was an error, but it wasn't due to expired context. We should try to invoke node_exporter, as needed.
		reporter.tryInvokeNodeExporter(ctx)
	}
	return true
}

func (reporter *MetricReporter) parseSampleRate(resp *http.Response) {
	// do we have the SampleRate header ?
	if strings, hasValue := resp.Header[http.CanonicalHeaderKey("SampleRate")]; hasValue {
		// we have the samplerate, we need to read it.
		if len(strings) == 0 {
			return
		}
		sampleRate, err := time.ParseDuration(strings[0] + "s")
		if err != nil {
			return
		}
		reporter.gatherInterval = sampleRate
	}
}

// tryDetachNodeExporter detaches itself from the existing node exporter process, if such was invoked by this algod instance.
func (reporter *MetricReporter) tryDetachNodeExporter() {
	proc := reporter.neProcess
	if proc != nil {
		proc.Release()
	}
	reporter.neProcess = nil
	// release the neSync lock..
	if reporter.neSync != nil {
		// close the socket, so that other process could take ownership.
		reporter.neSync.Close()
		reporter.neSync = nil
	}
}

// parseNodeExporterArgs parses the NodeExporterPath configuration string to extract Node Exporter's arguments.
func parseNodeExporterArgs(nodeExporterPath string, nodeExporterListenAddress string, nodeExporterMetricsPath string) []string {
	whitespaceRE := regexp.MustCompile(`\s+`)
	listenAddressRE := regexp.MustCompile(`--web.listen-address=(.+)`)
	telemetryPathRE := regexp.MustCompile(`--web.telemetry-path=(.+)`)
	vargs := whitespaceRE.Split(nodeExporterPath, -1)
	temp := vargs[:0]
	for _, varg := range vargs {
		if listenAddressRE.MatchString(varg) {
			nodeExporterListenAddress = listenAddressRE.FindStringSubmatch(varg)[1]
		} else if telemetryPathRE.MatchString(varg) {
			nodeExporterMetricsPath = telemetryPathRE.FindStringSubmatch(varg)[1]
		} else if varg == "" {
			continue
		} else {
			temp = append(temp, varg)
		}
	}
	vargs = append(vargs[:len(temp)],
		"--web.listen-address="+nodeExporterListenAddress,
		"--web.telemetry-path="+nodeExporterMetricsPath)
	return vargs
}

func (reporter *MetricReporter) tryInvokeNodeExporter(ctx context.Context) {
	var err error
	if nil == reporter.neSync {
		// try to create it.
		if reporter.neSync, err = net.Listen("tcp", nodeExporterSyncAddr); err != nil {
			// we couldn't get a hold of this port number; that's an expected behaviour for any algod instance that isn't the first one..
			return
		}
		// good ! we were able to obtain ownership of this port
	} else {
		// we already own this port. we need to check on the current status of node_exporter.
		if reporter.neProcess != nil {
			// process is already running.
			return
		}
	}
	// give the node exporter the same environment variable we've received.
	neAttributes := os.ProcAttr{
		Dir: filepath.Dir(os.Args[0]),
		Env: os.Environ(),
	}
	if nodeExporterRedirectOutput {
		neAttributes.Files = []*os.File{
			os.Stdin,
			os.Stdout,
			os.Stderr}
	}
	// prepare the vargs that the new process is going to have.
	vargs := parseNodeExporterArgs(reporter.serviceConfig.NodeExporterPath, reporter.serviceConfig.NodeExporterListenAddress, nodeExporterMetricsPath)
	// launch the process
	proc, err := os.StartProcess(vargs[0], vargs, &neAttributes)
	if err != nil {
		// logging.Base().Debugf("Unable to start node exporter : %v", err)
		return
	}
	// logging.Base().Debugf("Started node exporter with pid : %d", proc.Pid)

	reporter.neProcess = proc

	// wait for the process to complete on a separate goroutine, and set the reporter.neProcess variable to nil once it's done.
	go func(proc **os.Process) {
		(*proc).Wait()
		// status, _ :=
		// logging.Base().Debugf("Node exporter process ended : %v", status)
		(*proc) = nil
	}(&reporter.neProcess)
	return
}