Files
vpp-maglev/internal/vpp/warmup_test.go
Pim van Pelt 6d78921edd Restart-neutral VPP LB sync; deterministic AS ordering; maglevt cadence; v0.9.5
Three reliability fixes bundled with docs updates.

Restart-neutral VPP LB sync via a startup warmup window
(internal/vpp/warmup.go). Before this, a maglevd restart would
immediately issue SyncLBStateAll with every backend still in
StateUnknown — mapped through BackendEffectiveWeight to weight
0 — and VPP would black-hole all new flows until the checker's
rise counters caught up, several seconds later. The new warmup
tracker owns a process-wide state machine gated by two config
knobs: vpp.lb.startup-min-delay (default 5s) is an absolute
hands-off window during which neither the periodic sync loop
nor the per-transition reconciler touches VPP; vpp.lb.
startup-max-delay (default 30s) is the watchdog for a per-VIP
release phase that runs between the two, releasing each frontend
as soon as every backend it references reaches a non-Unknown
state. At max-delay a final SyncLBStateAll runs for any stragglers
still in Unknown. Config reload does not reset the clock. Both
delays can be set to 0 to disable the warmup entirely. The
reconciler's suppressed-during-warmup events log at DEBUG so
operators can still see them with --log-level debug. Unit tests
cover the tracker state machine, allBackendsKnown precondition,
and the zero-delay escape hatch.

Deterministic AS iteration in VPP LB sync. reconcileVIP and
recreateVIP now issue their lb_as_add_del / lb_as_set_weight
calls in numeric IP order (IPv4 before IPv6, ascending within
each family) via a new sortedIPKeys helper, instead of Go map
iteration order. VPP's LB plugin breaks per-bucket ties in the
Maglev lookup table by insertion position in its internal AS
vec, so without a stable call order two maglevd instances on
the same config could push identical AS sets into VPP in
different orders and produce divergent new-flow tables. Numeric
sort is used in preference to lexicographic so the sync log
stays human-readable: string order would place 10.0.0.10 before
10.0.0.2, and the same problem in v6. Unit tests cover empty,
single, v4/v6 numeric vs lexicographic, v4-before-v6 grouping,
a 1000-iteration stability loop against Go's randomised map
iteration, insertion-order invariance, and the desiredAS
call-site type.

maglevt interval fix. runProbeLoop used to sleep the full
jittered interval after every probe, so a 100ms --interval
with a 30ms probe actually produced a 130ms period. The sleep
now subtracts result.Duration so cadence matches the flag.
Probes that overrun clamp sleep to zero and fire the next
probe immediately without trying to catch up on missed cycles
— a slow backend doesn't get flooded with back-to-back probes
at the moment it's already struggling.

Docs. config-guide now documents flush-on-down and the new
startup-min-delay / startup-max-delay knobs; user-guide's
maglevd section explains the restart-neutrality property, the
three warmup phases, and the relevant slog lines operators
should watch for during a bounce.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-15 12:53:42 +02:00

233 lines
6.9 KiB
Go

// Copyright (c) 2026, Pim van Pelt <pim@ipng.ch>
package vpp
import (
"net"
"testing"
"time"
"git.ipng.ch/ipng/vpp-maglev/internal/config"
"git.ipng.ch/ipng/vpp-maglev/internal/health"
)
// TestWarmupTrackerBasic pins the state-machine transitions the tracker
// owns: inMinDelay, isReleased, tryRelease, finishAll, isAllDone. Time
// is manipulated by backdating startAt — there is no hidden global
// clock, so a test can assert behaviour at any simulated point in
// the [0, maxDelay] window by rewinding startAt before each step.
func TestWarmupTrackerBasic(t *testing.T) {
t.Run("min-delay gates everything", func(t *testing.T) {
w := newWarmupTracker()
w.configure(5*time.Second, 30*time.Second)
// At t=0 we are inside min-delay.
if !w.inMinDelay() {
t.Error("t=0: expected inMinDelay=true")
}
if w.isReleased("fe1") {
t.Error("t=0: expected isReleased(fe1)=false")
}
if w.tryRelease("fe1") {
t.Error("t=0: tryRelease should fail inside min-delay")
}
if w.isAllDone() {
t.Error("t=0: expected isAllDone=false")
}
})
t.Run("per-VIP release after min-delay", func(t *testing.T) {
w := newWarmupTracker()
w.configure(5*time.Second, 30*time.Second)
// Simulate t=10s by backdating startAt 10s into the past.
w.startAt = time.Now().Add(-10 * time.Second)
if w.inMinDelay() {
t.Error("t=10s: expected inMinDelay=false")
}
if w.isReleased("fe1") {
t.Error("t=10s: fe1 should not be released yet")
}
if !w.tryRelease("fe1") {
t.Error("t=10s: tryRelease(fe1) should succeed")
}
if !w.isReleased("fe1") {
t.Error("after tryRelease: isReleased(fe1) should be true")
}
// A second call returns true (already-released path).
if !w.tryRelease("fe1") {
t.Error("second tryRelease(fe1) should return true (already released)")
}
// Other VIPs are independent.
if w.isReleased("fe2") {
t.Error("releasing fe1 should not affect fe2")
}
})
t.Run("finishAll opens all gates", func(t *testing.T) {
w := newWarmupTracker()
w.configure(5*time.Second, 30*time.Second)
// Inside min-delay: fe1 not released.
if w.isReleased("fe1") {
t.Error("pre-finishAll: fe1 should not be released")
}
w.finishAll()
if !w.isAllDone() {
t.Error("post-finishAll: isAllDone should be true")
}
if !w.isReleased("fe1") {
t.Error("post-finishAll: every frontend should be released")
}
if w.inMinDelay() {
t.Error("post-finishAll: inMinDelay should be false")
}
// Second call is idempotent.
w.finishAll()
})
t.Run("doneChan closes on finishAll", func(t *testing.T) {
w := newWarmupTracker()
w.configure(5*time.Second, 30*time.Second)
select {
case <-w.doneChan():
t.Fatal("doneChan should not be readable before finishAll")
default:
}
w.finishAll()
select {
case <-w.doneChan():
// expected
case <-time.After(100 * time.Millisecond):
t.Fatal("doneChan should be readable after finishAll")
}
})
t.Run("configure is idempotent after first call", func(t *testing.T) {
w := newWarmupTracker()
w.configure(5*time.Second, 30*time.Second)
// Reload with shorter delays should be a no-op so a config
// reload mid-warmup doesn't move the goalposts.
w.configure(1*time.Second, 2*time.Second)
if w.minDelay != 5*time.Second {
t.Errorf("minDelay got %v, want %v", w.minDelay, 5*time.Second)
}
if w.maxDelay != 30*time.Second {
t.Errorf("maxDelay got %v, want %v", w.maxDelay, 30*time.Second)
}
})
}
// staticStateSource is a minimal StateSource for allBackendsKnown tests.
// It holds a fixed config and a static backend-state map; BackendState
// returns (state, true) for entries in the map and (StateUnknown, false)
// for everything else — matching how checker.Checker would report a
// backend that isn't under its watch.
type staticStateSource struct {
cfg *config.Config
states map[string]health.State
}
func (s *staticStateSource) Config() *config.Config { return s.cfg }
func (s *staticStateSource) BackendState(name string) (health.State, bool) {
st, ok := s.states[name]
return st, ok
}
// TestAllBackendsKnown pins the per-VIP release precondition. A
// frontend is eligible for release during the warmup phase iff every
// backend it references has reached a non-Unknown state. StateDown,
// StatePaused, and StateDisabled all count as "known" — the property
// is "has the checker reported at least once", not "is healthy".
func TestAllBackendsKnown(t *testing.T) {
ip := func(s string) net.IP { return net.ParseIP(s) }
fe := config.Frontend{
Address: ip("192.0.2.1"),
Protocol: "tcp",
Port: 80,
Pools: []config.Pool{
{Name: "primary", Backends: map[string]config.PoolBackend{
"b1": {Weight: 100},
"b2": {Weight: 100},
}},
{Name: "fallback", Backends: map[string]config.PoolBackend{
"b3": {Weight: 100},
}},
},
}
cases := []struct {
name string
states map[string]health.State
want bool
}{
{
name: "all up → known",
states: map[string]health.State{
"b1": health.StateUp, "b2": health.StateUp, "b3": health.StateUp,
},
want: true,
},
{
name: "mixed up/down/disabled → known",
// All of up, down, paused, disabled are "non-Unknown" and
// therefore counts as known; allBackendsKnown is about
// "has the checker reported once", not about health.
states: map[string]health.State{
"b1": health.StateDown, "b2": health.StateDisabled, "b3": health.StatePaused,
},
want: true,
},
{
name: "one still unknown → not known",
states: map[string]health.State{
"b1": health.StateUp, "b2": health.StateUnknown, "b3": health.StateUp,
},
want: false,
},
{
name: "backend not reported at all (checker doesn't know it) → not known",
states: map[string]health.State{"b1": health.StateUp, "b2": health.StateUp},
want: false,
},
{
name: "fallback pool unknown → not known",
states: map[string]health.State{
"b1": health.StateUp, "b2": health.StateUp, "b3": health.StateUnknown,
},
want: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
src := &staticStateSource{cfg: &config.Config{}, states: tc.states}
got := allBackendsKnown(fe, src)
if got != tc.want {
t.Errorf("got %v, want %v", got, tc.want)
}
})
}
}
// TestWarmupTrackerZeroDelays pins the "warmup disabled" escape hatch:
// with both delays set to 0, tryRelease succeeds immediately and
// isReleased returns true for every frontend without needing any
// state transitions. This is the configuration an operator picks
// when they'd rather take the brief startup black-hole than wait
// out the warmup — typically for tests and dev setups.
func TestWarmupTrackerZeroDelays(t *testing.T) {
w := newWarmupTracker()
w.configure(0, 0)
if w.inMinDelay() {
t.Error("min=0: expected inMinDelay=false immediately")
}
if !w.tryRelease("fe1") {
t.Error("min=0: tryRelease should succeed immediately")
}
if !w.isReleased("fe1") {
t.Error("min=0: isReleased(fe1) should be true after tryRelease")
}
}