Skip to content
Snippets Groups Projects
Commit 6d808b86 authored by redzippo1's avatar redzippo1 Committed by Oliver
Browse files

PR for Issues#219 (#220)

* Add slave_port attribute to metric "redis_connected_slave_offset"
parent c50e8531
No related branches found
No related tags found
No related merge requests found
Loading
Loading
@@ -171,7 +171,7 @@ func (e *Exporter) initGauges() {
Namespace: e.namespace,
Name: "connected_slave_offset",
Help: "Offset of connected slave",
}, []string{"addr", "alias", "slave_ip", "slave_state"})
}, []string{"addr", "alias", "slave_ip", "slave_port", "slave_state"})
e.metrics["db_keys"] = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: e.namespace,
Name: "db_keys",
Loading
Loading
@@ -416,7 +416,7 @@ func parseDBKeyspaceString(db string, stats string) (keysTotal float64, keysExpi
slave0:ip=10.254.11.1,port=6379,state=online,offset=1751844676,lag=0
slave1:ip=10.254.11.2,port=6379,state=online,offset=1751844222,lag=0
*/
func parseConnectedSlaveString(slaveName string, slaveInfo string) (offset float64, ip string, state string, ok bool) {
func parseConnectedSlaveString(slaveName string, slaveInfo string) (offset float64, ip string, port string, state string, ok bool) {
ok = false
if matched, _ := regexp.MatchString(`^slave\d+`, slaveName); !matched {
return
Loading
Loading
@@ -437,6 +437,7 @@ func parseConnectedSlaveString(slaveName string, slaveInfo string) (offset float
}
ok = true
ip = connectedSlaveInfo["ip"]
port = connectedSlaveInfo["port"]
state = connectedSlaveInfo["state"]
 
return
Loading
Loading
@@ -472,8 +473,8 @@ func extractConfigMetrics(config []string, addr string, alias string, scrapes ch
return
}
 
func (e *Exporter) extractInfoMetrics(info, addr string, alias string, scrapes chan<- scrapeResult, dbCount int, padDBKeyCounts bool) error {
cmdstats := false
func (e *Exporter) extractInfoMetrics(info, addr string, alias string, scrapes chan<- scrapeResult, dbCount int) error {
var fieldClass string
lines := strings.Split(info, "\r\n")
 
instanceInfo := map[string]string{}
Loading
Loading
@@ -482,14 +483,11 @@ func (e *Exporter) extractInfoMetrics(info, addr string, alias string, scrapes c
for _, line := range lines {
log.Debugf("info: %s", line)
if len(line) > 0 && line[0] == '#' {
if strings.Contains(line, "Commandstats") {
cmdstats = true
}
fieldClass = line[2:]
continue
}
 
if (len(line) < 2) || (!strings.Contains(line, ":")) {
cmdstats = false
continue
}
 
Loading
Loading
@@ -497,8 +495,10 @@ func (e *Exporter) extractInfoMetrics(info, addr string, alias string, scrapes c
if len(split) != 2 {
continue
}
fieldKey := split[0]
fieldValue := split[1]
if _, ok := instanceInfoFields[fieldKey]; ok {
instanceInfo[fieldKey] = fieldValue
continue
Loading
Loading
@@ -509,40 +509,44 @@ func (e *Exporter) extractInfoMetrics(info, addr string, alias string, scrapes c
continue
}
 
if fieldKey == "master_link_status" {
e.metricsMtx.RLock()
if fieldValue == "up" {
e.metrics["master_link_up"].WithLabelValues(addr, alias).Set(1)
} else {
e.metrics["master_link_up"].WithLabelValues(addr, alias).Set(0)
}
e.metricsMtx.RUnlock()
continue
}
if fieldKey == "uptime_in_seconds" {
if uptime, err := strconv.ParseFloat(fieldValue, 64); err == nil {
switch fieldClass {
case "Replication":
// only slave have this field
if fieldKey == "master_link_status" {
e.metricsMtx.RLock()
e.metrics["start_time_seconds"].WithLabelValues(addr, alias).Set(float64(time.Now().Unix()) - uptime)
if fieldValue == "up" {
e.metrics["master_link_up"].WithLabelValues(addr, alias).Set(1)
} else {
e.metrics["master_link_up"].WithLabelValues(addr, alias).Set(0)
}
e.metricsMtx.RUnlock()
continue
}
}
 
if slaveOffset, slaveIp, slaveState, ok := parseConnectedSlaveString(fieldKey, fieldValue); ok {
e.metricsMtx.RLock()
e.metrics["connected_slave_offset"].WithLabelValues(
addr,
alias,
slaveIp,
slaveState,
).Set(slaveOffset)
e.metricsMtx.RUnlock()
}
if slaveOffset, slaveIp, slavePort, slaveState, ok := parseConnectedSlaveString(fieldKey, fieldValue); ok {
e.metricsMtx.RLock()
e.metrics["connected_slave_offset"].WithLabelValues(
addr,
alias,
slaveIp,
slavePort,
slaveState,
).Set(slaveOffset)
e.metricsMtx.RUnlock()
continue
}
 
if !includeMetric(fieldKey) {
continue
}
case "Server":
if fieldKey == "uptime_in_seconds" {
if uptime, err := strconv.ParseFloat(fieldValue, 64); err == nil {
e.metricsMtx.RLock()
e.metrics["start_time_seconds"].WithLabelValues(addr, alias).Set(float64(time.Now().Unix()) - uptime)
e.metricsMtx.RUnlock()
}
}
 
if cmdstats {
case "Commandstats":
/*
Format:
 
Loading
Loading
@@ -577,54 +581,32 @@ func (e *Exporter) extractInfoMetrics(info, addr string, alias string, scrapes c
e.metrics["commands_duration_seconds_total"].WithLabelValues(addr, alias, cmd).Set(usecTotal / 1e6)
e.metricsMtx.RUnlock()
continue
}
 
if keysTotal, keysEx, avgTTL, ok := parseDBKeyspaceString(fieldKey, fieldValue); ok {
dbName := fieldKey
scrapes <- scrapeResult{Name: "db_keys", Addr: addr, Alias: alias, DB: dbName, Value: keysTotal}
scrapes <- scrapeResult{Name: "db_keys_expiring", Addr: addr, Alias: alias, DB: dbName, Value: keysEx}
if avgTTL > -1 {
scrapes <- scrapeResult{Name: "db_avg_ttl_seconds", Addr: addr, Alias: alias, DB: dbName, Value: avgTTL}
case "Keyspace":
if keysTotal, keysEx, avgTTL, ok := parseDBKeyspaceString(fieldKey, fieldValue); ok {
dbName := fieldKey
scrapes <- scrapeResult{Name: "db_keys", Addr: addr, Alias: alias, DB: dbName, Value: keysTotal}
scrapes <- scrapeResult{Name: "db_keys_expiring", Addr: addr, Alias: alias, DB: dbName, Value: keysEx}
if avgTTL > -1 {
scrapes <- scrapeResult{Name: "db_avg_ttl_seconds", Addr: addr, Alias: alias, DB: dbName, Value: avgTTL}
}
handledDBs[dbName] = true
continue
}
handledDBs[dbName] = true
continue
}
metricName := sanitizeMetricName(fieldKey)
if newName, ok := metricMap[metricName]; ok {
metricName = newName
}
 
var err error
var val float64
switch fieldValue {
case "ok":
val = 1
case "err", "fail":
val = 0
default:
val, err = strconv.ParseFloat(fieldValue, 64)
}
if err != nil {
log.Debugf("couldn't parse %s, err: %s", fieldValue, err)
if !includeMetric(fieldKey) {
continue
}
 
scrapes <- scrapeResult{Name: metricName, Addr: addr, Alias: alias, Value: val}
registerMetric(addr, alias, fieldKey, fieldValue, scrapes)
}
 
if padDBKeyCounts {
for dbIndex := 0; dbIndex < dbCount; dbIndex++ {
dbName := "db" + strconv.Itoa(dbIndex)
if _, exists := handledDBs[dbName]; !exists {
scrapes <- scrapeResult{Name: "db_keys", Addr: addr, Alias: alias, DB: dbName, Value: 0}
scrapes <- scrapeResult{Name: "db_keys_expiring", Addr: addr, Alias: alias, DB: dbName, Value: 0}
}
for dbIndex := 0; dbIndex < dbCount; dbIndex++ {
dbName := "db" + strconv.Itoa(dbIndex)
if _, exists := handledDBs[dbName]; !exists {
scrapes <- scrapeResult{Name: "db_keys", Addr: addr, Alias: alias, DB: dbName, Value: 0}
scrapes <- scrapeResult{Name: "db_keys_expiring", Addr: addr, Alias: alias, DB: dbName, Value: 0}
}
}
 
Loading
Loading
@@ -650,6 +632,59 @@ func (e *Exporter) extractInfoMetrics(info, addr string, alias string, scrapes c
return nil
}
 
func (e *Exporter) extractClusterInfoMetrics(info, addr, alias string, scrapes chan<- scrapeResult) error {
lines := strings.Split(info, "\r\n")
for _, line := range lines {
log.Debugf("info: %s", line)
split := strings.Split(line, ":")
if len(split) != 2 {
continue
}
fieldKey := split[0]
fieldValue := split[1]
if !includeMetric(fieldKey) {
continue
}
registerMetric(addr, alias, fieldKey, fieldValue, scrapes)
}
return nil
}
func registerMetric(addr, alias, fieldKey, fieldValue string, scrapes chan<- scrapeResult) error {
metricName := sanitizeMetricName(fieldKey)
if newName, ok := metricMap[metricName]; ok {
metricName = newName
}
var err error
var val float64
switch fieldValue {
case "ok":
val = 1
case "err", "fail":
val = 0
default:
val, err = strconv.ParseFloat(fieldValue, 64)
}
if err != nil {
log.Debugf("couldn't parse %s, err: %s", fieldValue, err)
}
scrapes <- scrapeResult{Name: metricName, Addr: addr, Alias: alias, Value: val}
return nil
}
func doRedisCmd(c redis.Conn, cmd string, args ...interface{}) (reply interface{}, err error) {
log.Debugf("c.Do() - running command: %s %s", cmd, args)
defer log.Debugf("c.Do() - done")
Loading
Loading
@@ -806,7 +841,7 @@ func (e *Exporter) scrapeRedisHost(scrapes chan<- scrapeResult, addr string, idx
 
if isClusterEnabled {
if clusterInfo, err := redis.String(doRedisCmd(c, "CLUSTER", "INFO")); err == nil {
e.extractInfoMetrics(clusterInfo, addr, e.redis.Aliases[idx], scrapes, dbCount, false)
e.extractClusterInfoMetrics(clusterInfo, addr, e.redis.Aliases[idx], scrapes)
 
// in cluster mode Redis only supports one database so no extra padding beyond that needed
dbCount = 1
Loading
Loading
@@ -821,7 +856,7 @@ func (e *Exporter) scrapeRedisHost(scrapes chan<- scrapeResult, addr string, idx
}
}
 
e.extractInfoMetrics(infoAll, addr, e.redis.Aliases[idx], scrapes, dbCount, true)
e.extractInfoMetrics(infoAll, addr, e.redis.Aliases[idx], scrapes, dbCount)
 
if reply, err := doRedisCmd(c, "LATENCY", "LATEST"); err == nil {
var eventName string
Loading
Loading
Loading
Loading
@@ -547,15 +547,15 @@ func TestKeyspaceStringParser(t *testing.T) {
}
 
type slaveData struct {
k, v string
ip, state string
offset float64
ok bool
k, v string
ip, state, port string
offset float64
ok bool
}
 
func TestParseConnectedSlaveString(t *testing.T) {
tsts := []slaveData{
{k: "slave0", v: "ip=10.254.11.1,port=6379,state=online,offset=1751844676,lag=0", offset: 1751844676, ip: "10.254.11.1", state: "online", ok: true},
{k: "slave0", v: "ip=10.254.11.1,port=6379,state=online,offset=1751844676,lag=0", offset: 1751844676, ip: "10.254.11.1", port: "6379", state: "online", ok: true},
{k: "slave1", v: "offset=1", offset: 1, ok: true},
{k: "slave2", v: "ip=1.2.3.4,state=online,offset=123", offset: 123, ip: "1.2.3.4", state: "online", ok: true},
{k: "slave", v: "offset=1751844676", ok: false},
Loading
Loading
@@ -564,15 +564,15 @@ func TestParseConnectedSlaveString(t *testing.T) {
}
 
for _, tst := range tsts {
if offset, ip, state, ok := parseConnectedSlaveString(tst.k, tst.v); true {
if offset, ip, port, state, ok := parseConnectedSlaveString(tst.k, tst.v); true {
 
if ok != tst.ok {
t.Errorf("failed for: db:%s stats:%s", tst.k, tst.v)
continue
}
 
if offset != tst.offset || ip != tst.ip || state != tst.state {
t.Errorf("values not matching, string:%s %f %s %s", tst.v, offset, ip, state)
if offset != tst.offset || ip != tst.ip || port != tst.port || state != tst.state {
t.Errorf("values not matching, string:%s %f %s %s %s", tst.v, offset, ip, port, state)
}
}
}
Loading
Loading
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment