Adds a per-frontend flush-on-down flag (default true) that causes maglevd to set is_flush=true on lb_as_set_weight when a backend transitions to StateDown, tearing down existing flows pinned to the dead AS instead of just draining them. rise/fall debouncing in the health checker already absorbs single-probe flaps, so a fall-counted down is almost always a real outage — and during a real outage the client-visible "connection refused" oscillation window (where VPP keeps steering existing flows at a dead AS until retry) is a reliability regression worth closing by default. Operators who want the pre-flag drain-only behaviour can set flush-on-down: false per frontend. BackendEffectiveWeight's truth table grows one axis: StateDown now returns (0, flushOnDown); StateDisabled still unconditionally flushes; StateUnknown / StatePaused still never flush. The unit test pins all four combinations. The flag surfaces in the gRPC FrontendInfo message and in `maglevc show frontend <name>` right next to src-ip-sticky.
326 lines
10 KiB
Go
326 lines
10 KiB
Go
// Copyright (c) 2026, Pim van Pelt <pim@ipng.ch>
|
|
|
|
package health
|
|
|
|
import (
|
|
"testing"
|
|
|
|
"git.ipng.ch/ipng/vpp-maglev/internal/config"
|
|
)
|
|
|
|
// TestBackendEffectiveWeight locks down the state → (weight, flush) truth
|
|
// table. This is the single source of truth for how maglevd decides what
|
|
// to program into VPP for each backend state. If this test needs updating
|
|
// the behavior has deliberately changed.
|
|
func TestBackendEffectiveWeight(t *testing.T) {
|
|
cases := []struct {
|
|
name string
|
|
poolIdx int
|
|
activePool int
|
|
state State
|
|
cfgWeight int
|
|
flushOnDown bool
|
|
wantWeight uint8
|
|
wantFlush bool
|
|
}{
|
|
{"up active w100", 0, 0, StateUp, 100, true, 100, false},
|
|
{"up active w50", 0, 0, StateUp, 50, true, 50, false},
|
|
{"up active w0", 0, 0, StateUp, 0, true, 0, false},
|
|
{"up active clamp-high", 0, 0, StateUp, 150, true, 100, false},
|
|
{"up active clamp-low", 0, 0, StateUp, -5, true, 0, false},
|
|
|
|
{"up standby pool0 active=1", 0, 1, StateUp, 100, true, 0, false},
|
|
{"up standby pool1 active=0", 1, 0, StateUp, 100, true, 0, false},
|
|
{"up standby pool2 active=0", 2, 0, StateUp, 100, true, 0, false},
|
|
|
|
{"up failover pool1 active=1", 1, 1, StateUp, 100, true, 100, false},
|
|
|
|
{"unknown pool0 active=0", 0, 0, StateUnknown, 100, true, 0, false},
|
|
{"unknown pool1 active=0", 1, 0, StateUnknown, 100, true, 0, false},
|
|
|
|
// flush-on-down policy: default true flushes, explicit false drains.
|
|
{"down pool0 flushOnDown=true", 0, 0, StateDown, 100, true, 0, true},
|
|
{"down pool1 flushOnDown=true", 1, 1, StateDown, 100, true, 0, true},
|
|
{"down pool0 flushOnDown=false", 0, 0, StateDown, 100, false, 0, false},
|
|
{"down pool1 flushOnDown=false", 1, 1, StateDown, 100, false, 0, false},
|
|
|
|
// paused never flushes — drain-don't-kill is the whole point.
|
|
{"paused pool0 flushOnDown=true", 0, 0, StatePaused, 100, true, 0, false},
|
|
{"paused pool0 flushOnDown=false", 0, 0, StatePaused, 100, false, 0, false},
|
|
|
|
// disabled always flushes regardless of flush-on-down policy.
|
|
{"disabled pool0 flushOnDown=true", 0, 0, StateDisabled, 100, true, 0, true},
|
|
{"disabled pool1 flushOnDown=false", 1, 1, StateDisabled, 100, false, 0, true},
|
|
}
|
|
|
|
for _, tc := range cases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
w, f := BackendEffectiveWeight(tc.poolIdx, tc.activePool, tc.state, tc.cfgWeight, tc.flushOnDown)
|
|
if w != tc.wantWeight {
|
|
t.Errorf("weight: got %d, want %d", w, tc.wantWeight)
|
|
}
|
|
if f != tc.wantFlush {
|
|
t.Errorf("flush: got %v, want %v", f, tc.wantFlush)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestActivePoolIndex locks down the priority-failover selector: the first
|
|
// pool containing at least one up backend is the active pool. Default 0.
|
|
func TestActivePoolIndex(t *testing.T) {
|
|
mkFE := func(pools ...[]string) config.Frontend {
|
|
out := make([]config.Pool, len(pools))
|
|
for i, p := range pools {
|
|
out[i] = config.Pool{Name: "p", Backends: map[string]config.PoolBackend{}}
|
|
for _, name := range p {
|
|
out[i].Backends[name] = config.PoolBackend{Weight: 100}
|
|
}
|
|
}
|
|
return config.Frontend{Pools: out}
|
|
}
|
|
|
|
cases := []struct {
|
|
name string
|
|
fe config.Frontend
|
|
states map[string]State
|
|
want int
|
|
}{
|
|
{
|
|
name: "pool0 has up, pool1 standby",
|
|
fe: mkFE([]string{"a", "b"}, []string{"c", "d"}),
|
|
states: map[string]State{"a": StateUp, "b": StateDown, "c": StateUp, "d": StateUp},
|
|
want: 0,
|
|
},
|
|
{
|
|
name: "pool0 all down, pool1 has up → failover",
|
|
fe: mkFE([]string{"a", "b"}, []string{"c", "d"}),
|
|
states: map[string]State{"a": StateDown, "b": StateDown, "c": StateUp, "d": StateUp},
|
|
want: 1,
|
|
},
|
|
{
|
|
name: "pool0 all disabled, pool1 has up → failover",
|
|
fe: mkFE([]string{"a", "b"}, []string{"c"}),
|
|
states: map[string]State{"a": StateDisabled, "b": StateDisabled, "c": StateUp},
|
|
want: 1,
|
|
},
|
|
{
|
|
name: "pool0 all paused, pool1 has up → failover",
|
|
fe: mkFE([]string{"a"}, []string{"c"}),
|
|
states: map[string]State{"a": StatePaused, "c": StateUp},
|
|
want: 1,
|
|
},
|
|
{
|
|
name: "pool0 all unknown (startup), pool1 up → pool1",
|
|
fe: mkFE([]string{"a"}, []string{"c"}),
|
|
states: map[string]State{"a": StateUnknown, "c": StateUp},
|
|
want: 1,
|
|
},
|
|
{
|
|
name: "nothing up anywhere → default 0",
|
|
fe: mkFE([]string{"a"}, []string{"c"}),
|
|
states: map[string]State{"a": StateDown, "c": StateDown},
|
|
want: 0,
|
|
},
|
|
{
|
|
name: "1 up in pool0 is enough",
|
|
fe: mkFE([]string{"a", "b", "c"}, []string{"d"}),
|
|
states: map[string]State{"a": StateDown, "b": StateDown, "c": StateUp, "d": StateUp},
|
|
want: 0,
|
|
},
|
|
{
|
|
name: "three tiers, pool0 and pool1 both empty → pool2",
|
|
fe: mkFE([]string{"a"}, []string{"b"}, []string{"c"}),
|
|
states: map[string]State{"a": StateDown, "b": StateDown, "c": StateUp},
|
|
want: 2,
|
|
},
|
|
}
|
|
|
|
for _, tc := range cases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
got := ActivePoolIndex(tc.fe, tc.states)
|
|
if got != tc.want {
|
|
t.Errorf("got pool %d, want pool %d", got, tc.want)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestActivePoolIndexWeightedFailover pins the rule that a pool is only
|
|
// "active" when it has at least one backend that is both up AND has a
|
|
// non-zero configured weight. A pool whose up backends are all
|
|
// weight=0 contributes no serving capacity, so failover should fall
|
|
// through to the next tier.
|
|
//
|
|
// This was a latent bug: ActivePoolIndex used to check state alone and
|
|
// would return poolIdx=0 even when every primary backend had weight=0,
|
|
// leaving the fallback pool unused even though it was the only pool
|
|
// that could actually serve traffic.
|
|
func TestActivePoolIndexWeightedFailover(t *testing.T) {
|
|
mkFE := func(pools ...map[string]int) config.Frontend {
|
|
out := make([]config.Pool, len(pools))
|
|
for i, p := range pools {
|
|
out[i] = config.Pool{Name: "p", Backends: map[string]config.PoolBackend{}}
|
|
for name, w := range p {
|
|
out[i].Backends[name] = config.PoolBackend{Weight: w}
|
|
}
|
|
}
|
|
return config.Frontend{Pools: out}
|
|
}
|
|
|
|
cases := []struct {
|
|
name string
|
|
fe config.Frontend
|
|
states map[string]State
|
|
want int
|
|
}{
|
|
{
|
|
name: "primary has only weight-0 backends → failover to secondary",
|
|
fe: mkFE(
|
|
map[string]int{"a": 0, "b": 0},
|
|
map[string]int{"c": 100},
|
|
),
|
|
states: map[string]State{"a": StateUp, "b": StateUp, "c": StateUp},
|
|
want: 1,
|
|
},
|
|
{
|
|
name: "primary has a weight-0 AND a weight>0 backend → primary stays active",
|
|
fe: mkFE(
|
|
map[string]int{"a": 0, "b": 50},
|
|
map[string]int{"c": 100},
|
|
),
|
|
states: map[string]State{"a": StateUp, "b": StateUp, "c": StateUp},
|
|
want: 0,
|
|
},
|
|
{
|
|
name: "primary w>0 backend is down, w=0 sibling is up → failover",
|
|
fe: mkFE(
|
|
map[string]int{"a": 0, "b": 50},
|
|
map[string]int{"c": 100},
|
|
),
|
|
states: map[string]State{"a": StateUp, "b": StateDown, "c": StateUp},
|
|
want: 1,
|
|
},
|
|
{
|
|
name: "two tiers of weight-0 → fall through to third tier",
|
|
fe: mkFE(
|
|
map[string]int{"a": 0},
|
|
map[string]int{"b": 0},
|
|
map[string]int{"c": 100},
|
|
),
|
|
states: map[string]State{"a": StateUp, "b": StateUp, "c": StateUp},
|
|
want: 2,
|
|
},
|
|
{
|
|
name: "every tier weight-0 → default 0 (nothing can serve)",
|
|
fe: mkFE(
|
|
map[string]int{"a": 0},
|
|
map[string]int{"b": 0},
|
|
),
|
|
states: map[string]State{"a": StateUp, "b": StateUp},
|
|
want: 0,
|
|
},
|
|
}
|
|
|
|
for _, tc := range cases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
got := ActivePoolIndex(tc.fe, tc.states)
|
|
if got != tc.want {
|
|
t.Errorf("got pool %d, want pool %d", got, tc.want)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestComputeFrontendState locks down the reduction rule: frontends are
|
|
// up iff any backend has effective weight > 0, unknown iff all backends
|
|
// are still in StateUnknown (or there are no backends), and down otherwise.
|
|
func TestComputeFrontendState(t *testing.T) {
|
|
mkFE := func(pools ...[]string) config.Frontend {
|
|
out := make([]config.Pool, len(pools))
|
|
for i, p := range pools {
|
|
out[i] = config.Pool{Name: "p", Backends: map[string]config.PoolBackend{}}
|
|
for _, name := range p {
|
|
out[i].Backends[name] = config.PoolBackend{Weight: 100}
|
|
}
|
|
}
|
|
return config.Frontend{Pools: out}
|
|
}
|
|
|
|
cases := []struct {
|
|
name string
|
|
fe config.Frontend
|
|
states map[string]State
|
|
want FrontendState
|
|
}{
|
|
{
|
|
name: "no backends → unknown",
|
|
fe: config.Frontend{Pools: []config.Pool{{Name: "primary", Backends: map[string]config.PoolBackend{}}}},
|
|
want: FrontendStateUnknown,
|
|
},
|
|
{
|
|
name: "all unknown (startup) → unknown",
|
|
fe: mkFE([]string{"a", "b"}),
|
|
states: map[string]State{"a": StateUnknown, "b": StateUnknown},
|
|
want: FrontendStateUnknown,
|
|
},
|
|
{
|
|
name: "one up in primary → up",
|
|
fe: mkFE([]string{"a", "b"}),
|
|
states: map[string]State{"a": StateUp, "b": StateDown},
|
|
want: FrontendStateUp,
|
|
},
|
|
{
|
|
name: "all down → down",
|
|
fe: mkFE([]string{"a", "b"}),
|
|
states: map[string]State{"a": StateDown, "b": StateDown},
|
|
want: FrontendStateDown,
|
|
},
|
|
{
|
|
name: "all disabled → down",
|
|
fe: mkFE([]string{"a", "b"}),
|
|
states: map[string]State{"a": StateDisabled, "b": StateDisabled},
|
|
want: FrontendStateDown,
|
|
},
|
|
{
|
|
name: "all paused → down",
|
|
fe: mkFE([]string{"a"}),
|
|
states: map[string]State{"a": StatePaused},
|
|
want: FrontendStateDown,
|
|
},
|
|
{
|
|
name: "primary down, secondary up → up (failover)",
|
|
fe: mkFE([]string{"a"}, []string{"b"}),
|
|
states: map[string]State{"a": StateDown, "b": StateUp},
|
|
want: FrontendStateUp,
|
|
},
|
|
{
|
|
name: "primary up, secondary down → up (secondary standby ignored)",
|
|
fe: mkFE([]string{"a"}, []string{"b"}),
|
|
states: map[string]State{"a": StateUp, "b": StateDown},
|
|
want: FrontendStateUp,
|
|
},
|
|
{
|
|
name: "primary unknown, secondary unknown → unknown",
|
|
fe: mkFE([]string{"a"}, []string{"b"}),
|
|
states: map[string]State{"a": StateUnknown, "b": StateUnknown},
|
|
want: FrontendStateUnknown,
|
|
},
|
|
{
|
|
name: "primary down, secondary unknown → down",
|
|
fe: mkFE([]string{"a"}, []string{"b"}),
|
|
states: map[string]State{"a": StateDown, "b": StateUnknown},
|
|
want: FrontendStateDown,
|
|
},
|
|
}
|
|
|
|
for _, tc := range cases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
got := ComputeFrontendState(tc.fe, tc.states)
|
|
if got != tc.want {
|
|
t.Errorf("got %s, want %s", got, tc.want)
|
|
}
|
|
})
|
|
}
|
|
}
|