Add Prometheus metrics endpoint; containerize integration tests

Prometheus metrics (internal/metrics/, cmd/maglevd/)
- New --metrics-addr flag (default :9091, env MAGLEV_METRICS_ADDR)
  serving /metrics via promhttp.
- Gauge metrics scraped on demand via a custom prometheus.Collector:
  maglev_backend_state, maglev_backend_health, maglev_backend_enabled,
  maglev_frontend_pool_backend_weight.
- Inline counter/histogram metrics updated per probe:
  maglev_probe_total (by backend, type, result, code),
  maglev_probe_duration_seconds (by backend, type),
  maglev_backend_transitions_total (by backend, from, to).
- StateSource interface in metrics package breaks the import cycle
  with checker; checker.Checker satisfies it via GetBackendInfo.

Integration tests
- Run maglevd inside a containerlab node (debian:trixie-slim with
  build/ bind-mounted) instead of on the host. Eliminates port
  collisions with any host maglevd.
- maglevc commands run via docker exec into the maglevd container.
- Add 6 Prometheus test cases: endpoint reachable, all backends
  report state=up, probe counters non-zero, duration histogram
  populated, pool weights correct, transition counters present.
This commit is contained in:
2026-04-11 20:50:59 +02:00
parent 8bde00eb61
commit 4ab3096c8b
9 changed files with 311 additions and 18 deletions

View File

@@ -8,10 +8,13 @@ import (
"fmt"
"log/slog"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
@@ -19,6 +22,7 @@ import (
"git.ipng.ch/ipng/vpp-maglev/internal/checker"
"git.ipng.ch/ipng/vpp-maglev/internal/config"
"git.ipng.ch/ipng/vpp-maglev/internal/grpcapi"
"git.ipng.ch/ipng/vpp-maglev/internal/metrics"
)
func main() {
@@ -35,6 +39,7 @@ func run() error {
enableReflection := flag.Bool("reflection", true, "enable gRPC server reflection (for grpcurl)")
configPath := stringFlag("config", "/etc/vpp-maglev/maglev.yaml", "MAGLEV_CONFIG", "path to maglev.yaml")
grpcAddr := stringFlag("grpc-addr", ":9090", "MAGLEV_GRPC_ADDR", "gRPC listen address")
metricsAddr := stringFlag("metrics-addr", ":9091", "MAGLEV_METRICS_ADDR", "Prometheus /metrics listen address (empty to disable)")
logLevel := stringFlag("log-level", "info", "MAGLEV_LOG_LEVEL", "log level (debug|info|warn|error)")
flag.Parse()
@@ -106,6 +111,21 @@ func run() error {
}
}()
// ---- Prometheus metrics -------------------------------------------------
if *metricsAddr != "" {
reg := prometheus.DefaultRegisterer
metrics.Register(reg, chkr)
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
slog.Info("metrics-listening", "addr", *metricsAddr)
go func() {
if err := http.ListenAndServe(*metricsAddr, mux); err != nil && err != http.ErrServerClosed {
slog.Error("metrics-serve-error", "err", err)
}
}()
}
// ---- signal handling ----------------------------------------------------
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGINT)

View File

@@ -12,6 +12,7 @@ inspection and control.
|---|---|---|---|
| `--config` | `MAGLEV_CONFIG` | `/etc/vpp-maglev/maglev.yaml` | Path to the YAML configuration file. |
| `--grpc-addr` | `MAGLEV_GRPC_ADDR` | `:9090` | TCP address on which the gRPC server listens. |
| `--metrics-addr` | `MAGLEV_METRICS_ADDR` | `:9091` | TCP address for the Prometheus `/metrics` HTTP endpoint. Set to empty to disable. |
| `--log-level` | `MAGLEV_LOG_LEVEL` | `info` | Log verbosity: `debug`, `info`, `warn`, or `error`. |
| `--check` | — | — | Read and validate the config file, then exit. Exits 0 if the config is valid, 1 on YAML parse error, 2 on semantic error. |
| `--reflection` | — | `true` | Enable gRPC server reflection. Allows `grpcurl` to introspect the API without the `.proto` file. Set to `false` to disable. |

8
go.mod
View File

@@ -13,6 +13,14 @@ require (
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
golang.org/x/text v0.35.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260120221211-b8f7ae30c516 // indirect
)

14
go.sum
View File

@@ -1,3 +1,5 @@
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
@@ -16,6 +18,16 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
@@ -30,6 +42,8 @@ go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2W
go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@@ -13,6 +13,7 @@ import (
"git.ipng.ch/ipng/vpp-maglev/internal/config"
"git.ipng.ch/ipng/vpp-maglev/internal/health"
"git.ipng.ch/ipng/vpp-maglev/internal/metrics"
"git.ipng.ch/ipng/vpp-maglev/internal/prober"
)
@@ -267,6 +268,22 @@ func (c *Checker) GetBackend(name string) (BackendSnapshot, bool) {
return BackendSnapshot{Health: w.backend, Config: w.entry}, true
}
// GetBackendInfo returns the health state and key config fields for a backend.
// Satisfies metrics.StateSource.
func (c *Checker) GetBackendInfo(name string) (metrics.BackendInfo, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
w, ok := c.workers[name]
if !ok {
return metrics.BackendInfo{}, false
}
return metrics.BackendInfo{
Health: w.backend,
Enabled: w.entry.Enabled,
HCName: w.entry.HealthCheck,
}, true
}
// PauseBackend pauses health checking for a backend by name. The probe
// goroutine is cancelled so no further traffic is sent to the backend. The
// backend's state is set to paused and remains frozen until ResumeBackend is
@@ -466,6 +483,7 @@ func (c *Checker) runProbe(ctx context.Context, name string, pos, total int) {
slog.Debug("probe-start", "backend", name, "type", hc.Type)
start := time.Now()
result = prober.ForType(hc.Type)(probeCtx, pcfg)
elapsed := time.Since(start)
cancel()
slog.Debug("probe-done",
"backend", name,
@@ -473,8 +491,14 @@ func (c *Checker) runProbe(ctx context.Context, name string, pos, total int) {
"ok", result.OK,
"code", result.Code,
"detail", result.Detail,
"elapsed", time.Since(start).Round(time.Millisecond).String(),
"elapsed", elapsed.Round(time.Millisecond).String(),
)
res := "success"
if !result.OK {
res = "failure"
}
metrics.ProbeTotal.WithLabelValues(name, hc.Type, res, result.Code).Inc()
metrics.ProbeDuration.WithLabelValues(name, hc.Type).Observe(elapsed.Seconds())
}
c.mu.Lock()
@@ -493,6 +517,7 @@ func (c *Checker) runProbe(ctx context.Context, name string, pos, total int) {
"code", result.Code,
"detail", result.Detail,
)
metrics.TransitionTotal.WithLabelValues(name, t.From.String(), t.To.String()).Inc()
c.emitForBackend(name, addr, t, c.cfg.Frontends)
}
c.mu.Unlock()

176
internal/metrics/metrics.go Normal file
View File

@@ -0,0 +1,176 @@
// Copyright (c) 2026, Pim van Pelt <pim@ipng.ch>
// Package metrics exposes Prometheus metrics for maglevd.
//
// Gauge-type metrics (backend state, health counter, weights) are collected
// on demand when Prometheus scrapes /metrics via the Collector. Counter and
// histogram metrics (probe totals, probe duration, transitions) are updated
// inline from the probe loop.
package metrics
import (
"git.ipng.ch/ipng/vpp-maglev/internal/config"
"git.ipng.ch/ipng/vpp-maglev/internal/health"
"github.com/prometheus/client_golang/prometheus"
)
// BackendInfo holds the health and config state needed by the collector.
type BackendInfo struct {
Health *health.Backend
Enabled bool
HCName string // healthcheck name from config
}
// StateSource provides read-only access to the running checker state.
type StateSource interface {
ListBackends() []string
GetBackendInfo(name string) (BackendInfo, bool)
ListFrontends() []string
GetFrontend(name string) (config.Frontend, bool)
}
// ---- inline metrics (updated per probe) ------------------------------------
var (
ProbeTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "maglev",
Subsystem: "probe",
Name: "total",
Help: "Total number of health-check probes executed.",
}, []string{"backend", "type", "result", "code"})
ProbeDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "maglev",
Subsystem: "probe",
Name: "duration_seconds",
Help: "Health-check probe duration in seconds.",
Buckets: []float64{.001, .0025, .005, .01, .025, .05, .1, .25, .5, 1, 2.5},
}, []string{"backend", "type"})
TransitionTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "maglev",
Subsystem: "backend",
Name: "transitions_total",
Help: "Total number of backend state transitions.",
}, []string{"backend", "from", "to"})
)
// ---- collector (scraped on demand) -----------------------------------------
// Collector implements prometheus.Collector by querying the running checker
// on each scrape. This avoids stale label sets when backends are added or
// removed by a config reload.
type Collector struct {
src StateSource
backendState *prometheus.Desc
backendHealth *prometheus.Desc
backendEnabled *prometheus.Desc
poolWeight *prometheus.Desc
}
// NewCollector creates a Collector backed by the given StateSource.
func NewCollector(src StateSource) *Collector {
return &Collector{
src: src,
backendState: prometheus.NewDesc(
"maglev_backend_state",
"Current backend state (1 = active for the given state label).",
[]string{"backend", "address", "healthcheck", "state"}, nil,
),
backendHealth: prometheus.NewDesc(
"maglev_backend_health",
"Current health counter value.",
[]string{"backend"}, nil,
),
backendEnabled: prometheus.NewDesc(
"maglev_backend_enabled",
"Whether the backend is enabled (1) or disabled (0).",
[]string{"backend"}, nil,
),
poolWeight: prometheus.NewDesc(
"maglev_frontend_pool_backend_weight",
"Configured weight of a backend in a frontend pool (0-100).",
[]string{"frontend", "pool", "backend"}, nil,
),
}
}
// Describe implements prometheus.Collector.
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.backendState
ch <- c.backendHealth
ch <- c.backendEnabled
ch <- c.poolWeight
}
// Collect implements prometheus.Collector.
func (c *Collector) Collect(ch chan<- prometheus.Metric) {
states := []health.State{
health.StateUnknown,
health.StateUp,
health.StateDown,
health.StatePaused,
health.StateRemoved,
}
for _, name := range c.src.ListBackends() {
info, ok := c.src.GetBackendInfo(name)
if !ok {
continue
}
addr := info.Health.Address.String()
// One time-series per possible state; the current state is 1, rest 0.
for _, s := range states {
val := 0.0
if info.Health.State == s {
val = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.backendState, prometheus.GaugeValue, val,
name, addr, info.HCName, s.String(),
)
}
ch <- prometheus.MustNewConstMetric(
c.backendHealth, prometheus.GaugeValue,
float64(info.Health.Counter.Health), name,
)
enabled := 0.0
if info.Enabled {
enabled = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.backendEnabled, prometheus.GaugeValue, enabled, name,
)
}
for _, feName := range c.src.ListFrontends() {
fe, ok := c.src.GetFrontend(feName)
if !ok {
continue
}
for _, pool := range fe.Pools {
for beName, pb := range pool.Backends {
ch <- prometheus.MustNewConstMetric(
c.poolWeight, prometheus.GaugeValue,
float64(pb.Weight), feName, pool.Name, beName,
)
}
}
}
}
// Register registers all metrics with the given registry.
func Register(reg prometheus.Registerer, src StateSource) *Collector {
coll := NewCollector(src)
reg.MustRegister(coll)
reg.MustRegister(ProbeTotal)
reg.MustRegister(ProbeDuration)
reg.MustRegister(TransitionTotal)
return coll
}

View File

@@ -1,6 +1,5 @@
*** Settings ***
Library OperatingSystem
Library Process
Resource ../common.robot
Suite Setup Setup Suite
@@ -10,26 +9,20 @@ Suite Teardown Cleanup Suite
*** Variables ***
${lab-name} maglevd-test
${lab-file} maglevd-lab/maglevd.clab.yml
${config-file} maglevd-lab/maglev.yaml
${runtime} docker
${GRPC_PORT} 9091
${MAGLEVD_NODE} clab-maglevd-test-maglevd
${METRICS_URL} http://172.20.30.2:9091/metrics
*** Test Cases ***
Deploy maglevd-test lab
[Documentation] Deploy the containerlab topology. The maglevd node starts
... automatically as PID 1 via start.sh and begins probing the nginx
... backends immediately.
${rc} ${output} = Run And Return Rc And Output
... ${CLAB_BIN} --runtime ${runtime} deploy -t ${CURDIR}/${lab-file}
Log ${output}
Should Be Equal As Integers ${rc} 0
Start maglevd
${handle} = Start Process ${MAGLEVD}
... --config ${CURDIR}/${config-file}
... --grpc-addr :${GRPC_PORT}
... --log-level debug
... alias=maglevd stdout=${EXECDIR}/tests/out/maglevd.log
... stderr=STDOUT
Set Suite Variable ${MAGLEVD_HANDLE} ${handle}
Sleep 3s Wait for nginx containers and probes to converge
All backends reach up state
@@ -86,22 +79,55 @@ Enable backend restarts probing
... Backend Should Be Up nginx2
Prometheus endpoint is reachable
${rc} ${output} = Run And Return Rc And Output
... curl -sf ${METRICS_URL}
Log ${output}
Should Be Equal As Integers ${rc} 0
Should Contain ${output} maglev_backend_state
Prometheus reports all backends up
${output} = Scrape Metrics
# Each backend should have state="up" = 1.
Should Contain ${output} maglev_backend_state{address="172.20.30.11",backend="nginx1",healthcheck="http-check",state="up"} 1
Should Contain ${output} maglev_backend_state{address="172.20.30.12",backend="nginx2",healthcheck="http-check",state="up"} 1
Should Contain ${output} maglev_backend_state{address="172.20.30.13",backend="nginx3",healthcheck="http-check",state="up"} 1
Prometheus reports probe counters
${output} = Scrape Metrics
Should Match Regexp ${output} maglev_probe_total\\{backend="nginx1".*result="success".*\\}\\s+[1-9]
Should Match Regexp ${output} maglev_probe_total\\{backend="nginx2".*result="success".*\\}\\s+[1-9]
Should Match Regexp ${output} maglev_probe_total\\{backend="nginx3".*result="success".*\\}\\s+[1-9]
Prometheus reports probe duration histogram
${output} = Scrape Metrics
Should Match Regexp ${output} maglev_probe_duration_seconds_count\\{backend="nginx1".*\\}\\s+[1-9]
Prometheus reports pool weights
${output} = Scrape Metrics
Should Contain ${output} maglev_frontend_pool_backend_weight{backend="nginx1",frontend="http-vip",pool="primary"} 100
Should Contain ${output} maglev_frontend_pool_backend_weight{backend="nginx3",frontend="http-vip",pool="fallback"} 100
Prometheus reports transition counters
${output} = Scrape Metrics
# All backends transitioned unknown → up during startup.
Should Match Regexp ${output} maglev_backend_transitions_total\\{backend="nginx1",from="unknown",to="up"\\}\\s+[1-9]
*** Keywords ***
Setup Suite
${arch} = Run go env GOARCH
Set Suite Variable ${ARCH} ${arch}
Set Suite Variable ${MAGLEVD} ${EXECDIR}/build/${ARCH}/maglevd
Set Suite Variable ${MAGLEVC} ${EXECDIR}/build/${ARCH}/maglevc
Cleanup Suite
Run Keyword And Ignore Error Terminate Process maglevd kill=true
Run docker logs ${MAGLEVD_NODE} > ${EXECDIR}/tests/out/maglevd.log 2>&1
Run ${CLAB_BIN} --runtime ${runtime} destroy -t ${CURDIR}/${lab-file} --cleanup
Maglevc
[Documentation] Run a maglevc command and return its output.
[Documentation] Run a maglevc command inside the maglevd container.
[Arguments] ${cmd}
${rc} ${output} = Run And Return Rc And Output
... ${MAGLEVC} --server\=localhost:${GRPC_PORT} --color\=false ${cmd}
... docker exec ${MAGLEVD_NODE} /opt/maglev/build/${ARCH}/maglevc --color\=false ${cmd}
Log ${output}
Should Be Equal As Integers ${rc} 0
RETURN ${output}
@@ -133,3 +159,10 @@ Probe Count Should Be Positive
${count} = Get Probe Count ${name}
Should Be True ${count} > 0
... No health-check requests found in nginx logs for ${name}
Scrape Metrics
[Documentation] Fetch the Prometheus /metrics endpoint from the maglevd container.
${rc} ${output} = Run And Return Rc And Output
... curl -sf ${METRICS_URL}
Should Be Equal As Integers ${rc} 0
RETURN ${output}

View File

@@ -6,6 +6,15 @@ mgmt:
topology:
nodes:
maglevd:
kind: linux
image: debian:trixie-slim
mgmt-ipv4: 172.20.30.2
binds:
- ../../../build:/opt/maglev/build:ro
- ./maglev.yaml:/etc/maglev/maglev.yaml:ro
- ./start.sh:/start.sh:ro
cmd: /start.sh
nginx1:
kind: linux
image: nginx:alpine

View File

@@ -0,0 +1,7 @@
#!/bin/sh
ARCH=$(uname -m)
case "$ARCH" in
x86_64) ARCH=amd64 ;;
aarch64) ARCH=arm64 ;;
esac
exec /opt/maglev/build/${ARCH}/maglevd --config /etc/maglev/maglev.yaml --log-level debug