forked from grafana/metrictank
-
Notifications
You must be signed in to change notification settings - Fork 0
/
metrictank.go
433 lines (369 loc) · 15.5 KB
/
metrictank.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
package main
import (
"flag"
"fmt"
l "log"
"os"
"os/signal"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
_ "net/http/pprof"
"github.com/Dieterbe/profiletrigger/heap"
"github.com/Shopify/sarama"
"github.com/benbjohnson/clock"
"github.com/raintank/dur"
"github.com/raintank/metrictank/api"
"github.com/raintank/metrictank/cluster"
"github.com/raintank/metrictank/idx"
"github.com/raintank/metrictank/idx/cassandra"
"github.com/raintank/metrictank/idx/memory"
"github.com/raintank/metrictank/input"
inCarbon "github.com/raintank/metrictank/input/carbon"
inKafkaMdm "github.com/raintank/metrictank/input/kafkamdm"
"github.com/raintank/metrictank/mdata"
"github.com/raintank/metrictank/mdata/cache"
"github.com/raintank/metrictank/mdata/notifierKafka"
"github.com/raintank/metrictank/mdata/notifierNsq"
"github.com/raintank/metrictank/stats"
statsConfig "github.com/raintank/metrictank/stats/config"
"github.com/raintank/metrictank/usage"
"github.com/raintank/worldping-api/pkg/log"
"github.com/rakyll/globalconf"
)
var (
logLevel int
warmupPeriod time.Duration
startupTime time.Time
GitHash = "(none)"
metrics *mdata.AggMetrics
metricIndex idx.MetricIndex
// Misc:
instance = flag.String("instance", "default", "instance identifier. must be unique. used in clustering messages, for naming queue consumers and emitted metrics")
showVersion = flag.Bool("version", false, "print version string")
confFile = flag.String("config", "/etc/metrictank/metrictank.ini", "configuration file path")
accountingPeriodStr = flag.String("accounting-period", "5min", "accounting period to track per-org usage metrics")
// Data:
dropFirstChunk = flag.Bool("drop-first-chunk", false, "forego persisting of first received (and typically incomplete) chunk")
chunkMaxStaleStr = flag.String("chunk-max-stale", "1h", "max age for a chunk before to be considered stale and to be persisted to Cassandra.")
metricMaxStaleStr = flag.String("metric-max-stale", "6h", "max age for a metric before to be considered stale and to be purged from memory.")
gcIntervalStr = flag.String("gc-interval", "1h", "Interval to run garbage collection job.")
warmUpPeriodStr = flag.String("warm-up-period", "1h", "duration before secondary nodes start serving requests")
// Cassandra:
cassandraAddrs = flag.String("cassandra-addrs", "localhost", "cassandra host (may be given multiple times as comma-separated list)")
cassandraKeyspace = flag.String("cassandra-keyspace", "metrictank", "cassandra keyspace to use for storing the metric data table")
cassandraConsistency = flag.String("cassandra-consistency", "one", "write consistency (any|one|two|three|quorum|all|local_quorum|each_quorum|local_one")
cassandraHostSelectionPolicy = flag.String("cassandra-host-selection-policy", "tokenaware,hostpool-epsilon-greedy", "")
cassandraTimeout = flag.Int("cassandra-timeout", 1000, "cassandra timeout in milliseconds")
cassandraReadConcurrency = flag.Int("cassandra-read-concurrency", 20, "max number of concurrent reads to cassandra.")
cassandraWriteConcurrency = flag.Int("cassandra-write-concurrency", 10, "max number of concurrent writes to cassandra.")
cassandraReadQueueSize = flag.Int("cassandra-read-queue-size", 100, "max number of outstanding reads before blocking. value doesn't matter much")
cassandraWriteQueueSize = flag.Int("cassandra-write-queue-size", 100000, "write queue size per cassandra worker. should be large engough to hold all at least the total number of series expected, divided by how many workers you have")
cassandraRetries = flag.Int("cassandra-retries", 0, "how many times to retry a query before failing it")
cassandraWindowFactor = flag.Int("cassandra-window-factor", 20, "size of compaction window relative to TTL")
cqlProtocolVersion = flag.Int("cql-protocol-version", 4, "cql protocol version to use")
cassandraSSL = flag.Bool("cassandra-ssl", false, "enable SSL connection to cassandra")
cassandraCaPath = flag.String("cassandra-ca-path", "/etc/metrictank/ca.pem", "cassandra CA certificate path when using SSL")
cassandraHostVerification = flag.Bool("cassandra-host-verification", true, "host (hostname and server cert) verification when using SSL")
cassandraAuth = flag.Bool("cassandra-auth", false, "enable cassandra authentication")
cassandraUsername = flag.String("cassandra-username", "cassandra", "username for authentication")
cassandraPassword = flag.String("cassandra-password", "cassandra", "password for authentication")
// Profiling, instrumentation and logging:
blockProfileRate = flag.Int("block-profile-rate", 0, "see https://golang.org/pkg/runtime/#SetBlockProfileRate")
memProfileRate = flag.Int("mem-profile-rate", 512*1024, "0 to disable. 1 for max precision (expensive!) see https://golang.org/pkg/runtime/#pkg-variables")
proftrigPath = flag.String("proftrigger-path", "/tmp", "path to store triggered profiles")
proftrigFreqStr = flag.String("proftrigger-freq", "60s", "inspect status frequency. set to 0 to disable")
proftrigMinDiffStr = flag.String("proftrigger-min-diff", "1h", "minimum time between triggered profiles")
proftrigHeapThresh = flag.Int("proftrigger-heap-thresh", 25000000000, "if this many bytes allocated, trigger a profile")
)
func init() {
flag.IntVar(&logLevel, "log-level", 2, "log level. 0=TRACE|1=DEBUG|2=INFO|3=WARN|4=ERROR|5=CRITICAL|6=FATAL")
}
func main() {
startupTime = time.Now()
/***********************************
Initialize Configuration
***********************************/
flag.Parse()
// if the user just wants the version, give it and exit
if *showVersion {
fmt.Printf("metrictank (built with %s, git hash %s)\n", runtime.Version(), GitHash)
return
}
// Only try and parse the conf file if it exists
path := ""
if _, err := os.Stat(*confFile); err == nil {
path = *confFile
}
conf, err := globalconf.NewWithOptions(&globalconf.Options{
Filename: path,
EnvPrefix: "MT_",
})
if err != nil {
log.Fatal(4, "error with configuration file: %s", err)
os.Exit(1)
}
// load config for metric ingestors
inCarbon.ConfigSetup()
inKafkaMdm.ConfigSetup()
// load config for cluster handlers
notifierNsq.ConfigSetup()
// load config for metricIndexers
memory.ConfigSetup()
cassandra.ConfigSetup()
// load config for API
api.ConfigSetup()
// load config for cluster
cluster.ConfigSetup()
// stats
statsConfig.ConfigSetup()
// storage-schemas, storage-aggregation files
mdata.ConfigSetup()
conf.ParseAll()
/***********************************
Initialize Logging
***********************************/
log.NewLogger(0, "console", fmt.Sprintf(`{"level": %d, "formatting":false}`, logLevel))
mdata.LogLevel = logLevel
inKafkaMdm.LogLevel = logLevel
api.LogLevel = logLevel
// workaround for https://github.com/grafana/grafana/issues/4055
switch logLevel {
case 0:
log.Level(log.TRACE)
case 1:
log.Level(log.DEBUG)
case 2:
log.Level(log.INFO)
case 3:
log.Level(log.WARN)
case 4:
log.Level(log.ERROR)
case 5:
log.Level(log.CRITICAL)
case 6:
log.Level(log.FATAL)
}
/***********************************
Validate settings needed for clustering
***********************************/
if *instance == "" {
log.Fatal(4, "instance can't be empty")
}
log.Info("Metrictank starting. Built from %s - Go version %s", GitHash, runtime.Version())
/***********************************
Initialize our Cluster
***********************************/
api.ConfigProcess()
cluster.ConfigProcess()
scheme := "http"
if api.UseSSL {
scheme = "https"
}
addrParts := strings.Split(api.Addr, ":")
port, err := strconv.ParseInt(addrParts[len(addrParts)-1], 10, 64)
if err != nil {
log.Fatal(4, "Could not parse port from listenAddr. %s", api.Addr)
}
cluster.Init(*instance, GitHash, startupTime, scheme, int(port))
/***********************************
Validate remaining settings
***********************************/
inCarbon.ConfigProcess()
inKafkaMdm.ConfigProcess(*instance)
notifierNsq.ConfigProcess()
notifierKafka.ConfigProcess(*instance)
statsConfig.ConfigProcess(*instance)
mdata.ConfigProcess()
if !inCarbon.Enabled && !inKafkaMdm.Enabled {
log.Fatal(4, "you should enable at least 1 input plugin")
}
sec := dur.MustParseUNsec("warm-up-period", *warmUpPeriodStr)
warmupPeriod = time.Duration(sec) * time.Second
chunkMaxStale := dur.MustParseUNsec("chunk-max-stale", *chunkMaxStaleStr)
metricMaxStale := dur.MustParseUNsec("metric-max-stale", *metricMaxStaleStr)
gcInterval := time.Duration(dur.MustParseUNsec("gc-interval", *gcIntervalStr)) * time.Second
proftrigFreq := dur.MustParseUsec("proftrigger-freq", *proftrigFreqStr)
proftrigMinDiff := int(dur.MustParseUNsec("proftrigger-min-diff", *proftrigMinDiffStr))
if proftrigFreq > 0 {
errors := make(chan error)
trigger, _ := heap.New(*proftrigPath, *proftrigHeapThresh, proftrigMinDiff, time.Duration(proftrigFreq)*time.Second, errors)
go func() {
for e := range errors {
log.Error(0, "profiletrigger heap: %s", e)
}
}()
go trigger.Run()
}
accountingPeriod := dur.MustParseUNsec("accounting-period", *accountingPeriodStr)
/***********************************
configure Profiling
***********************************/
runtime.SetBlockProfileRate(*blockProfileRate)
runtime.MemProfileRate = *memProfileRate
/************************************
handle interrupt signals
************************************/
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
/***********************************
collect stats
***********************************/
statsConfig.Start()
/***********************************
Initialize our backendStore
***********************************/
store, err := mdata.NewCassandraStore(*cassandraAddrs, *cassandraKeyspace, *cassandraConsistency, *cassandraCaPath, *cassandraUsername, *cassandraPassword, *cassandraHostSelectionPolicy, *cassandraTimeout, *cassandraReadConcurrency, *cassandraWriteConcurrency, *cassandraReadQueueSize, *cassandraWriteQueueSize, *cassandraRetries, *cqlProtocolVersion, *cassandraWindowFactor, *cassandraSSL, *cassandraAuth, *cassandraHostVerification, mdata.TTLs())
if err != nil {
log.Fatal(4, "failed to initialize cassandra. %s", err)
}
/***********************************
Initialize the Chunk Cache
***********************************/
ccache := cache.NewCCache()
/***********************************
Initialize our MemoryStore
***********************************/
metrics = mdata.NewAggMetrics(store, ccache, *dropFirstChunk, chunkMaxStale, metricMaxStale, gcInterval)
/***********************************
Initialize our Inputs
***********************************/
inputs := make([]input.Plugin, 0)
// note. all these New functions must either return a valid instance or call log.Fatal
if inCarbon.Enabled {
inputs = append(inputs, inCarbon.New())
}
if inKafkaMdm.Enabled {
sarama.Logger = l.New(os.Stdout, "[Sarama] ", l.LstdFlags)
inputs = append(inputs, inKafkaMdm.New())
}
if cluster.Mode == cluster.ModeMulti && len(inputs) > 1 {
log.Warn("It is not recommended to run a mulitnode cluster with more than 1 input plugin.")
}
/***********************************
Start the ClusterManager
***********************************/
cluster.Start()
/***********************************
Initialize our MetricIdx
***********************************/
pre := time.Now()
if memory.Enabled {
if metricIndex != nil {
log.Fatal(4, "Only 1 metricIndex handler can be enabled.")
}
metricIndex = memory.New()
}
if cassandra.Enabled {
if metricIndex != nil {
log.Fatal(4, "Only 1 metricIndex handler can be enabled.")
}
metricIndex = cassandra.New()
}
if metricIndex == nil {
log.Fatal(4, "No metricIndex handlers enabled.")
}
/***********************************
Initialize our API server
***********************************/
apiServer, err := api.NewServer()
if err != nil {
log.Fatal(4, "Failed to start API. %s", err.Error())
}
apiServer.BindMetricIndex(metricIndex)
apiServer.BindMemoryStore(metrics)
apiServer.BindBackendStore(store)
apiServer.BindCache(ccache)
go apiServer.Run()
/***********************************
Load index entries from the backend store.
***********************************/
err = metricIndex.Init()
if err != nil {
log.Fatal(4, "failed to initialize metricIndex: %s", err)
}
log.Info("metricIndex initialized in %s. starting data consumption", time.Now().Sub(pre))
/***********************************
Initialize MetricPerrist notifiers
***********************************/
handlers := make([]mdata.NotifierHandler, 0)
if notifierKafka.Enabled {
// The notifierKafka handler will block here until it has processed the backlog of metricPersist messages.
// it will block for at most kafka-cluster.backlog-process-timeout (default 60s)
handlers = append(handlers, notifierKafka.New(*instance, metrics, metricIndex))
}
if notifierNsq.Enabled {
handlers = append(handlers, notifierNsq.New(*instance, metrics, metricIndex))
}
mdata.InitPersistNotifier(handlers...)
/***********************************
Initialize usage Reporting
***********************************/
usg := usage.New(accountingPeriod, metrics, metricIndex, clock.New())
/***********************************
Start our inputs
***********************************/
for _, plugin := range inputs {
if carbonPlugin, ok := plugin.(*inCarbon.Carbon); ok {
carbonPlugin.IntervalGetter(inCarbon.NewIndexIntervalGetter(metricIndex))
}
plugin.Start(input.NewDefaultHandler(metrics, metricIndex, usg, plugin.Name()))
plugin.MaintainPriority()
}
// metric cluster.self.promotion_wait is how long a candidate (secondary node) has to wait until it can become a primary
// When the timer becomes 0 it means the in-memory buffer has been able to fully populate so that if you stop a primary
// and it was able to save its complete chunks, this node will be able to take over without dataloss.
// You can upgrade a candidate to primary while the timer is not 0 yet, it just means it may have missing data in the chunks that it will save.
maxChunkSpan := mdata.MaxChunkSpan()
stats.NewTimeDiffReporter32("cluster.self.promotion_wait", (uint32(time.Now().Unix())/maxChunkSpan+1)*maxChunkSpan)
/***********************************
Set our status so we can accept
requests from users.
***********************************/
if cluster.Manager.IsPrimary() {
cluster.Manager.SetReady()
} else {
cluster.Manager.SetReadyIn(warmupPeriod)
}
/***********************************
Wait for Shutdown
***********************************/
<-sigChan
// Leave the cluster. All other nodes will be notified we have left
// and so will stop sending us requests.
cluster.Stop()
// stop API
apiServer.Stop()
// shutdown our input plugins. These may take a while as we allow them
// to finish processing any metrics that have already been ingested.
timer := time.NewTimer(time.Second * 10)
var wg sync.WaitGroup
for _, plugin := range inputs {
wg.Add(1)
go func(plugin input.Plugin) {
log.Info("Shutting down %s consumer", plugin.Name())
plugin.Stop()
log.Info("%s consumer finished shutdown", plugin.Name())
wg.Done()
}(plugin)
}
pluginsStopped := make(chan struct{})
go func() {
wg.Wait()
close(pluginsStopped)
}()
select {
case <-timer.C:
log.Warn("Plugins taking too long to shutdown, not waiting any longer.")
case <-pluginsStopped:
timer.Stop()
}
log.Info("closing store")
store.Stop()
metricIndex.Stop()
log.Info("terminating.")
log.Close()
}