Frontend flush-on-down policy; v0.9.3

Adds a per-frontend flush-on-down flag (default true) that causes
maglevd to set is_flush=true on lb_as_set_weight when a backend
transitions to StateDown, tearing down existing flows pinned to
the dead AS instead of just draining them. rise/fall debouncing
in the health checker already absorbs single-probe flaps, so a
fall-counted down is almost always a real outage — and during a
real outage the client-visible "connection refused" oscillation
window (where VPP keeps steering existing flows at a dead AS
until retry) is a reliability regression worth closing by default.
Operators who want the pre-flag drain-only behaviour can set
flush-on-down: false per frontend.

BackendEffectiveWeight's truth table grows one axis: StateDown
now returns (0, flushOnDown); StateDisabled still unconditionally
flushes; StateUnknown / StatePaused still never flush. The unit
test pins all four combinations.

The flag surfaces in the gRPC FrontendInfo message and in
`maglevc show frontend <name>` right next to src-ip-sticky.
This commit is contained in:
2026-04-15 01:42:46 +02:00
parent 6293521157
commit 6b2b04b2d1
9 changed files with 78 additions and 36 deletions

View File

@@ -14,41 +14,48 @@ import (
// the behavior has deliberately changed.
func TestBackendEffectiveWeight(t *testing.T) {
cases := []struct {
name string
poolIdx int
activePool int
state State
cfgWeight int
wantWeight uint8
wantFlush bool
name string
poolIdx int
activePool int
state State
cfgWeight int
flushOnDown bool
wantWeight uint8
wantFlush bool
}{
{"up active w100", 0, 0, StateUp, 100, 100, false},
{"up active w50", 0, 0, StateUp, 50, 50, false},
{"up active w0", 0, 0, StateUp, 0, 0, false},
{"up active clamp-high", 0, 0, StateUp, 150, 100, false},
{"up active clamp-low", 0, 0, StateUp, -5, 0, false},
{"up active w100", 0, 0, StateUp, 100, true, 100, false},
{"up active w50", 0, 0, StateUp, 50, true, 50, false},
{"up active w0", 0, 0, StateUp, 0, true, 0, false},
{"up active clamp-high", 0, 0, StateUp, 150, true, 100, false},
{"up active clamp-low", 0, 0, StateUp, -5, true, 0, false},
{"up standby pool0 active=1", 0, 1, StateUp, 100, 0, false},
{"up standby pool1 active=0", 1, 0, StateUp, 100, 0, false},
{"up standby pool2 active=0", 2, 0, StateUp, 100, 0, false},
{"up standby pool0 active=1", 0, 1, StateUp, 100, true, 0, false},
{"up standby pool1 active=0", 1, 0, StateUp, 100, true, 0, false},
{"up standby pool2 active=0", 2, 0, StateUp, 100, true, 0, false},
{"up failover pool1 active=1", 1, 1, StateUp, 100, 100, false},
{"up failover pool1 active=1", 1, 1, StateUp, 100, true, 100, false},
{"unknown pool0 active=0", 0, 0, StateUnknown, 100, 0, false},
{"unknown pool1 active=0", 1, 0, StateUnknown, 100, 0, false},
{"unknown pool0 active=0", 0, 0, StateUnknown, 100, true, 0, false},
{"unknown pool1 active=0", 1, 0, StateUnknown, 100, true, 0, false},
{"down pool0 active=0", 0, 0, StateDown, 100, 0, false},
{"down pool1 active=1", 1, 1, StateDown, 100, 0, false},
// flush-on-down policy: default true flushes, explicit false drains.
{"down pool0 flushOnDown=true", 0, 0, StateDown, 100, true, 0, true},
{"down pool1 flushOnDown=true", 1, 1, StateDown, 100, true, 0, true},
{"down pool0 flushOnDown=false", 0, 0, StateDown, 100, false, 0, false},
{"down pool1 flushOnDown=false", 1, 1, StateDown, 100, false, 0, false},
{"paused pool0 active=0", 0, 0, StatePaused, 100, 0, false},
// paused never flushes — drain-don't-kill is the whole point.
{"paused pool0 flushOnDown=true", 0, 0, StatePaused, 100, true, 0, false},
{"paused pool0 flushOnDown=false", 0, 0, StatePaused, 100, false, 0, false},
{"disabled pool0 active=0", 0, 0, StateDisabled, 100, 0, true},
{"disabled pool1 active=1", 1, 1, StateDisabled, 100, 0, true},
// disabled always flushes regardless of flush-on-down policy.
{"disabled pool0 flushOnDown=true", 0, 0, StateDisabled, 100, true, 0, true},
{"disabled pool1 flushOnDown=false", 1, 1, StateDisabled, 100, false, 0, true},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
w, f := BackendEffectiveWeight(tc.poolIdx, tc.activePool, tc.state, tc.cfgWeight)
w, f := BackendEffectiveWeight(tc.poolIdx, tc.activePool, tc.state, tc.cfgWeight, tc.flushOnDown)
if w != tc.wantWeight {
t.Errorf("weight: got %d, want %d", w, tc.wantWeight)
}