Frontend flush-on-down policy; v0.9.3
Adds a per-frontend flush-on-down flag (default true) that causes maglevd to set is_flush=true on lb_as_set_weight when a backend transitions to StateDown, tearing down existing flows pinned to the dead AS instead of just draining them. rise/fall debouncing in the health checker already absorbs single-probe flaps, so a fall-counted down is almost always a real outage — and during a real outage the client-visible "connection refused" oscillation window (where VPP keeps steering existing flows at a dead AS until retry) is a reliability regression worth closing by default. Operators who want the pre-flag drain-only behaviour can set flush-on-down: false per frontend. BackendEffectiveWeight's truth table grows one axis: StateDown now returns (0, flushOnDown); StateDisabled still unconditionally flushes; StateUnknown / StatePaused still never flush. The unit test pins all four combinations. The flag surfaces in the gRPC FrontendInfo message and in `maglevc show frontend <name>` right next to src-ip-sticky.
This commit is contained in:
2
Makefile
2
Makefile
@@ -15,7 +15,7 @@ FRONTEND_WEB_SRC := $(shell find cmd/frontend/web/src -type f 2>/dev/null) \
|
|||||||
FRONTEND_WEB_DIST := cmd/frontend/web/dist/index.html
|
FRONTEND_WEB_DIST := cmd/frontend/web/dist/index.html
|
||||||
|
|
||||||
NATIVE_ARCH := $(shell go env GOARCH)
|
NATIVE_ARCH := $(shell go env GOARCH)
|
||||||
VERSION := 0.9.2
|
VERSION := 0.9.3
|
||||||
COMMIT_HASH := $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
COMMIT_HASH := $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
||||||
DATE := $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
DATE := $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||||
LDFLAGS := -X '$(MODULE)/cmd.version=$(VERSION)' \
|
LDFLAGS := -X '$(MODULE)/cmd.version=$(VERSION)' \
|
||||||
|
|||||||
@@ -462,6 +462,7 @@ func runShowFrontend(ctx context.Context, client grpcapi.MaglevClient, args []st
|
|||||||
_, _ = fmt.Fprintf(w, "%s\t%s\n", label("protocol"), info.Protocol)
|
_, _ = fmt.Fprintf(w, "%s\t%s\n", label("protocol"), info.Protocol)
|
||||||
_, _ = fmt.Fprintf(w, "%s\t%d\n", label("port"), info.Port)
|
_, _ = fmt.Fprintf(w, "%s\t%d\n", label("port"), info.Port)
|
||||||
_, _ = fmt.Fprintf(w, "%s\t%t\n", label("src-ip-sticky"), info.SrcIpSticky)
|
_, _ = fmt.Fprintf(w, "%s\t%t\n", label("src-ip-sticky"), info.SrcIpSticky)
|
||||||
|
_, _ = fmt.Fprintf(w, "%s\t%t\n", label("flush-on-down"), info.FlushOnDown)
|
||||||
if info.Description != "" {
|
if info.Description != "" {
|
||||||
_, _ = fmt.Fprintf(w, "%s\t%s\n", label("description"), info.Description)
|
_, _ = fmt.Fprintf(w, "%s\t%s\n", label("description"), info.Description)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -121,6 +121,15 @@ type Frontend struct {
|
|||||||
Port uint16 // 0 means omitted (all ports)
|
Port uint16 // 0 means omitted (all ports)
|
||||||
Pools []Pool // ordered tiers; first pool with any up backend is active
|
Pools []Pool // ordered tiers; first pool with any up backend is active
|
||||||
SrcIPSticky bool // when true, VPP LB uses src-IP-based hashing for this VIP
|
SrcIPSticky bool // when true, VPP LB uses src-IP-based hashing for this VIP
|
||||||
|
// FlushOnDown: when true (default), a backend transition to
|
||||||
|
// StateDown causes maglevd to set is_flush=true on the VPP
|
||||||
|
// weight update so existing flows pinned to the dead AS are
|
||||||
|
// torn down immediately. With it false, down transitions only
|
||||||
|
// drain (weight=0, keep flows), matching the pre-flag
|
||||||
|
// behaviour. rise/fall debouncing already protects against
|
||||||
|
// single-probe flaps, so defaulting to flush=true is safe for
|
||||||
|
// the common case of a real outage.
|
||||||
|
FlushOnDown bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- raw YAML types --------------------------------------------------------
|
// ---- raw YAML types --------------------------------------------------------
|
||||||
@@ -202,6 +211,7 @@ type rawFrontend struct {
|
|||||||
Port uint16 `yaml:"port"`
|
Port uint16 `yaml:"port"`
|
||||||
Pools []rawPool `yaml:"pools"`
|
Pools []rawPool `yaml:"pools"`
|
||||||
SrcIPSticky bool `yaml:"src-ip-sticky"`
|
SrcIPSticky bool `yaml:"src-ip-sticky"`
|
||||||
|
FlushOnDown *bool `yaml:"flush-on-down"` // nil → default true
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- Check / Load ----------------------------------------------------------
|
// ---- Check / Load ----------------------------------------------------------
|
||||||
@@ -538,6 +548,7 @@ func convertFrontend(name string, r *rawFrontend, backends map[string]Backend) (
|
|||||||
Protocol: r.Protocol,
|
Protocol: r.Protocol,
|
||||||
Port: r.Port,
|
Port: r.Port,
|
||||||
SrcIPSticky: r.SrcIPSticky,
|
SrcIPSticky: r.SrcIPSticky,
|
||||||
|
FlushOnDown: boolDefault(r.FlushOnDown, true),
|
||||||
}
|
}
|
||||||
|
|
||||||
ip := net.ParseIP(r.Address)
|
ip := net.ParseIP(r.Address)
|
||||||
|
|||||||
@@ -1556,6 +1556,7 @@ type FrontendInfo struct {
|
|||||||
Pools []*PoolInfo `protobuf:"bytes,5,rep,name=pools,proto3" json:"pools,omitempty"`
|
Pools []*PoolInfo `protobuf:"bytes,5,rep,name=pools,proto3" json:"pools,omitempty"`
|
||||||
Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
|
Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
|
||||||
SrcIpSticky bool `protobuf:"varint,7,opt,name=src_ip_sticky,json=srcIpSticky,proto3" json:"src_ip_sticky,omitempty"` // VPP LB uses src-IP-based stickiness for this VIP
|
SrcIpSticky bool `protobuf:"varint,7,opt,name=src_ip_sticky,json=srcIpSticky,proto3" json:"src_ip_sticky,omitempty"` // VPP LB uses src-IP-based stickiness for this VIP
|
||||||
|
FlushOnDown bool `protobuf:"varint,8,opt,name=flush_on_down,json=flushOnDown,proto3" json:"flush_on_down,omitempty"` // tear down existing flows when a backend goes down
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
sizeCache protoimpl.SizeCache
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
@@ -1639,6 +1640,13 @@ func (x *FrontendInfo) GetSrcIpSticky() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (x *FrontendInfo) GetFlushOnDown() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.FlushOnDown
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
type ListBackendsResponse struct {
|
type ListBackendsResponse struct {
|
||||||
state protoimpl.MessageState `protogen:"open.v1"`
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
BackendNames []string `protobuf:"bytes,1,rep,name=backend_names,json=backendNames,proto3" json:"backend_names,omitempty"`
|
BackendNames []string `protobuf:"bytes,1,rep,name=backend_names,json=backendNames,proto3" json:"backend_names,omitempty"`
|
||||||
@@ -2596,7 +2604,7 @@ const file_proto_maglev_proto_rawDesc = "" +
|
|||||||
"\x10effective_weight\x18\x03 \x01(\x05R\x0feffectiveWeight\"S\n" +
|
"\x10effective_weight\x18\x03 \x01(\x05R\x0feffectiveWeight\"S\n" +
|
||||||
"\bPoolInfo\x12\x12\n" +
|
"\bPoolInfo\x12\x12\n" +
|
||||||
"\x04name\x18\x01 \x01(\tR\x04name\x123\n" +
|
"\x04name\x18\x01 \x01(\tR\x04name\x123\n" +
|
||||||
"\bbackends\x18\x02 \x03(\v2\x17.maglev.PoolBackendInfoR\bbackends\"\xda\x01\n" +
|
"\bbackends\x18\x02 \x03(\v2\x17.maglev.PoolBackendInfoR\bbackends\"\xfe\x01\n" +
|
||||||
"\fFrontendInfo\x12\x12\n" +
|
"\fFrontendInfo\x12\x12\n" +
|
||||||
"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
|
"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
|
||||||
"\aaddress\x18\x02 \x01(\tR\aaddress\x12\x1a\n" +
|
"\aaddress\x18\x02 \x01(\tR\aaddress\x12\x1a\n" +
|
||||||
@@ -2604,7 +2612,8 @@ const file_proto_maglev_proto_rawDesc = "" +
|
|||||||
"\x04port\x18\x04 \x01(\rR\x04port\x12&\n" +
|
"\x04port\x18\x04 \x01(\rR\x04port\x12&\n" +
|
||||||
"\x05pools\x18\x05 \x03(\v2\x10.maglev.PoolInfoR\x05pools\x12 \n" +
|
"\x05pools\x18\x05 \x03(\v2\x10.maglev.PoolInfoR\x05pools\x12 \n" +
|
||||||
"\vdescription\x18\x06 \x01(\tR\vdescription\x12\"\n" +
|
"\vdescription\x18\x06 \x01(\tR\vdescription\x12\"\n" +
|
||||||
"\rsrc_ip_sticky\x18\a \x01(\bR\vsrcIpSticky\";\n" +
|
"\rsrc_ip_sticky\x18\a \x01(\bR\vsrcIpSticky\x12\"\n" +
|
||||||
|
"\rflush_on_down\x18\b \x01(\bR\vflushOnDown\";\n" +
|
||||||
"\x14ListBackendsResponse\x12#\n" +
|
"\x14ListBackendsResponse\x12#\n" +
|
||||||
"\rbackend_names\x18\x01 \x03(\tR\fbackendNames\"0\n" +
|
"\rbackend_names\x18\x01 \x03(\tR\fbackendNames\"0\n" +
|
||||||
"\x18ListHealthChecksResponse\x12\x14\n" +
|
"\x18ListHealthChecksResponse\x12\x14\n" +
|
||||||
|
|||||||
@@ -509,6 +509,7 @@ func frontendToProto(name string, fe config.Frontend, src vpp.StateSource) *Fron
|
|||||||
Description: fe.Description,
|
Description: fe.Description,
|
||||||
Pools: pools,
|
Pools: pools,
|
||||||
SrcIpSticky: fe.SrcIPSticky,
|
SrcIpSticky: fe.SrcIPSticky,
|
||||||
|
FlushOnDown: fe.FlushOnDown,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -27,27 +27,39 @@ func ActivePoolIndex(fe config.Frontend, states map[string]State) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BackendEffectiveWeight is the pure mapping from (pool index, active pool,
|
// BackendEffectiveWeight is the pure mapping from (pool index, active pool,
|
||||||
// backend state, config weight) to the desired VPP AS weight and flush hint.
|
// backend state, config weight, flush-on-down policy) to the desired VPP AS
|
||||||
// This is the single source of truth for the state → dataplane rule.
|
// weight and flush hint. This is the single source of truth for the state
|
||||||
|
// → dataplane rule.
|
||||||
//
|
//
|
||||||
// A backend gets its configured weight iff it is up AND belongs to the
|
// A backend gets its configured weight iff it is up AND belongs to the
|
||||||
// currently-active pool. Every other case yields weight 0. Only StateDisabled
|
// currently-active pool. Every other case yields weight 0.
|
||||||
// produces flush=true (immediate session teardown).
|
//
|
||||||
|
// The flush hint controls whether VPP tears down existing flows pinned to
|
||||||
|
// the AS on the weight update (is_flush=true on lb_as_set_weight) or merely
|
||||||
|
// stops accepting new flows (drain, keep existing). StateDisabled always
|
||||||
|
// flushes — it's an operator-driven "this AS is going away" signal. StateDown
|
||||||
|
// flushes iff the frontend has flush-on-down enabled; the default is true,
|
||||||
|
// because rise/fall debouncing in the health checker already absorbs flaps
|
||||||
|
// and a fall-counted down is almost always a real outage the operator wants
|
||||||
|
// cleared from the session table fast. Unknown / paused never flush —
|
||||||
|
// unknown is pre-probe, and paused is an explicit drain-don't-kill signal.
|
||||||
//
|
//
|
||||||
// state in active pool not in active pool flush
|
// state in active pool not in active pool flush
|
||||||
// -------- -------------- ------------------- -----
|
// -------- -------------- ------------------- ----------------
|
||||||
// unknown 0 0 no
|
// unknown 0 0 no
|
||||||
// up configured 0 (standby) no
|
// up configured 0 (standby) no
|
||||||
// down 0 0 no
|
// down 0 0 flushOnDown
|
||||||
// paused 0 0 no
|
// paused 0 0 no
|
||||||
// disabled 0 0 yes
|
// disabled 0 0 yes
|
||||||
func BackendEffectiveWeight(poolIdx, activePool int, state State, cfgWeight int) (weight uint8, flush bool) {
|
func BackendEffectiveWeight(poolIdx, activePool int, state State, cfgWeight int, flushOnDown bool) (weight uint8, flush bool) {
|
||||||
switch state {
|
switch state {
|
||||||
case StateUp:
|
case StateUp:
|
||||||
if poolIdx == activePool {
|
if poolIdx == activePool {
|
||||||
return clampWeight(cfgWeight), false
|
return clampWeight(cfgWeight), false
|
||||||
}
|
}
|
||||||
return 0, false
|
return 0, false
|
||||||
|
case StateDown:
|
||||||
|
return 0, flushOnDown
|
||||||
case StateDisabled:
|
case StateDisabled:
|
||||||
return 0, true
|
return 0, true
|
||||||
default:
|
default:
|
||||||
@@ -63,7 +75,7 @@ func EffectiveWeights(fe config.Frontend, states map[string]State) map[int]map[s
|
|||||||
for poolIdx, pool := range fe.Pools {
|
for poolIdx, pool := range fe.Pools {
|
||||||
out[poolIdx] = make(map[string]uint8, len(pool.Backends))
|
out[poolIdx] = make(map[string]uint8, len(pool.Backends))
|
||||||
for bName, pb := range pool.Backends {
|
for bName, pb := range pool.Backends {
|
||||||
w, _ := BackendEffectiveWeight(poolIdx, activePool, states[bName], pb.Weight)
|
w, _ := BackendEffectiveWeight(poolIdx, activePool, states[bName], pb.Weight, fe.FlushOnDown)
|
||||||
out[poolIdx][bName] = w
|
out[poolIdx][bName] = w
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,41 +14,48 @@ import (
|
|||||||
// the behavior has deliberately changed.
|
// the behavior has deliberately changed.
|
||||||
func TestBackendEffectiveWeight(t *testing.T) {
|
func TestBackendEffectiveWeight(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
poolIdx int
|
poolIdx int
|
||||||
activePool int
|
activePool int
|
||||||
state State
|
state State
|
||||||
cfgWeight int
|
cfgWeight int
|
||||||
wantWeight uint8
|
flushOnDown bool
|
||||||
wantFlush bool
|
wantWeight uint8
|
||||||
|
wantFlush bool
|
||||||
}{
|
}{
|
||||||
{"up active w100", 0, 0, StateUp, 100, 100, false},
|
{"up active w100", 0, 0, StateUp, 100, true, 100, false},
|
||||||
{"up active w50", 0, 0, StateUp, 50, 50, false},
|
{"up active w50", 0, 0, StateUp, 50, true, 50, false},
|
||||||
{"up active w0", 0, 0, StateUp, 0, 0, false},
|
{"up active w0", 0, 0, StateUp, 0, true, 0, false},
|
||||||
{"up active clamp-high", 0, 0, StateUp, 150, 100, false},
|
{"up active clamp-high", 0, 0, StateUp, 150, true, 100, false},
|
||||||
{"up active clamp-low", 0, 0, StateUp, -5, 0, false},
|
{"up active clamp-low", 0, 0, StateUp, -5, true, 0, false},
|
||||||
|
|
||||||
{"up standby pool0 active=1", 0, 1, StateUp, 100, 0, false},
|
{"up standby pool0 active=1", 0, 1, StateUp, 100, true, 0, false},
|
||||||
{"up standby pool1 active=0", 1, 0, StateUp, 100, 0, false},
|
{"up standby pool1 active=0", 1, 0, StateUp, 100, true, 0, false},
|
||||||
{"up standby pool2 active=0", 2, 0, StateUp, 100, 0, false},
|
{"up standby pool2 active=0", 2, 0, StateUp, 100, true, 0, false},
|
||||||
|
|
||||||
{"up failover pool1 active=1", 1, 1, StateUp, 100, 100, false},
|
{"up failover pool1 active=1", 1, 1, StateUp, 100, true, 100, false},
|
||||||
|
|
||||||
{"unknown pool0 active=0", 0, 0, StateUnknown, 100, 0, false},
|
{"unknown pool0 active=0", 0, 0, StateUnknown, 100, true, 0, false},
|
||||||
{"unknown pool1 active=0", 1, 0, StateUnknown, 100, 0, false},
|
{"unknown pool1 active=0", 1, 0, StateUnknown, 100, true, 0, false},
|
||||||
|
|
||||||
{"down pool0 active=0", 0, 0, StateDown, 100, 0, false},
|
// flush-on-down policy: default true flushes, explicit false drains.
|
||||||
{"down pool1 active=1", 1, 1, StateDown, 100, 0, false},
|
{"down pool0 flushOnDown=true", 0, 0, StateDown, 100, true, 0, true},
|
||||||
|
{"down pool1 flushOnDown=true", 1, 1, StateDown, 100, true, 0, true},
|
||||||
|
{"down pool0 flushOnDown=false", 0, 0, StateDown, 100, false, 0, false},
|
||||||
|
{"down pool1 flushOnDown=false", 1, 1, StateDown, 100, false, 0, false},
|
||||||
|
|
||||||
{"paused pool0 active=0", 0, 0, StatePaused, 100, 0, false},
|
// paused never flushes — drain-don't-kill is the whole point.
|
||||||
|
{"paused pool0 flushOnDown=true", 0, 0, StatePaused, 100, true, 0, false},
|
||||||
|
{"paused pool0 flushOnDown=false", 0, 0, StatePaused, 100, false, 0, false},
|
||||||
|
|
||||||
{"disabled pool0 active=0", 0, 0, StateDisabled, 100, 0, true},
|
// disabled always flushes regardless of flush-on-down policy.
|
||||||
{"disabled pool1 active=1", 1, 1, StateDisabled, 100, 0, true},
|
{"disabled pool0 flushOnDown=true", 0, 0, StateDisabled, 100, true, 0, true},
|
||||||
|
{"disabled pool1 flushOnDown=false", 1, 1, StateDisabled, 100, false, 0, true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
w, f := BackendEffectiveWeight(tc.poolIdx, tc.activePool, tc.state, tc.cfgWeight)
|
w, f := BackendEffectiveWeight(tc.poolIdx, tc.activePool, tc.state, tc.cfgWeight, tc.flushOnDown)
|
||||||
if w != tc.wantWeight {
|
if w != tc.wantWeight {
|
||||||
t.Errorf("weight: got %d, want %d", w, tc.wantWeight)
|
t.Errorf("weight: got %d, want %d", w, tc.wantWeight)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -455,7 +455,7 @@ func desiredFromFrontend(cfg *config.Config, fe config.Frontend, src StateSource
|
|||||||
if _, already := d.ASes[addr]; already {
|
if _, already := d.ASes[addr]; already {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
w, flush := health.BackendEffectiveWeight(poolIdx, activePool, states[bName], pb.Weight)
|
w, flush := health.BackendEffectiveWeight(poolIdx, activePool, states[bName], pb.Weight, fe.FlushOnDown)
|
||||||
d.ASes[addr] = desiredAS{
|
d.ASes[addr] = desiredAS{
|
||||||
Address: b.Address,
|
Address: b.Address,
|
||||||
Weight: w,
|
Weight: w,
|
||||||
|
|||||||
@@ -212,6 +212,7 @@ message FrontendInfo {
|
|||||||
repeated PoolInfo pools = 5;
|
repeated PoolInfo pools = 5;
|
||||||
string description = 6;
|
string description = 6;
|
||||||
bool src_ip_sticky = 7; // VPP LB uses src-IP-based stickiness for this VIP
|
bool src_ip_sticky = 7; // VPP LB uses src-IP-based stickiness for this VIP
|
||||||
|
bool flush_on_down = 8; // tear down existing flows when a backend goes down
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListBackendsResponse {
|
message ListBackendsResponse {
|
||||||
|
|||||||
Reference in New Issue
Block a user