diff --git a/Makefile b/Makefile index 78a3edc..9d8f1ef 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ FRONTEND_WEB_SRC := $(shell find cmd/frontend/web/src -type f 2>/dev/null) \ FRONTEND_WEB_DIST := cmd/frontend/web/dist/index.html NATIVE_ARCH := $(shell go env GOARCH) -VERSION := 0.9.2 +VERSION := 0.9.3 COMMIT_HASH := $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE := $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X '$(MODULE)/cmd.version=$(VERSION)' \ diff --git a/cmd/maglevc/commands.go b/cmd/maglevc/commands.go index 2c890c5..14b53e7 100644 --- a/cmd/maglevc/commands.go +++ b/cmd/maglevc/commands.go @@ -462,6 +462,7 @@ func runShowFrontend(ctx context.Context, client grpcapi.MaglevClient, args []st _, _ = fmt.Fprintf(w, "%s\t%s\n", label("protocol"), info.Protocol) _, _ = fmt.Fprintf(w, "%s\t%d\n", label("port"), info.Port) _, _ = fmt.Fprintf(w, "%s\t%t\n", label("src-ip-sticky"), info.SrcIpSticky) + _, _ = fmt.Fprintf(w, "%s\t%t\n", label("flush-on-down"), info.FlushOnDown) if info.Description != "" { _, _ = fmt.Fprintf(w, "%s\t%s\n", label("description"), info.Description) } diff --git a/internal/config/config.go b/internal/config/config.go index 98fcfc3..52cdfb6 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -121,6 +121,15 @@ type Frontend struct { Port uint16 // 0 means omitted (all ports) Pools []Pool // ordered tiers; first pool with any up backend is active SrcIPSticky bool // when true, VPP LB uses src-IP-based hashing for this VIP + // FlushOnDown: when true (default), a backend transition to + // StateDown causes maglevd to set is_flush=true on the VPP + // weight update so existing flows pinned to the dead AS are + // torn down immediately. With it false, down transitions only + // drain (weight=0, keep flows), matching the pre-flag + // behaviour. rise/fall debouncing already protects against + // single-probe flaps, so defaulting to flush=true is safe for + // the common case of a real outage. + FlushOnDown bool } // ---- raw YAML types -------------------------------------------------------- @@ -202,6 +211,7 @@ type rawFrontend struct { Port uint16 `yaml:"port"` Pools []rawPool `yaml:"pools"` SrcIPSticky bool `yaml:"src-ip-sticky"` + FlushOnDown *bool `yaml:"flush-on-down"` // nil → default true } // ---- Check / Load ---------------------------------------------------------- @@ -538,6 +548,7 @@ func convertFrontend(name string, r *rawFrontend, backends map[string]Backend) ( Protocol: r.Protocol, Port: r.Port, SrcIPSticky: r.SrcIPSticky, + FlushOnDown: boolDefault(r.FlushOnDown, true), } ip := net.ParseIP(r.Address) diff --git a/internal/grpcapi/maglev.pb.go b/internal/grpcapi/maglev.pb.go index aea851e..0cd27fc 100644 --- a/internal/grpcapi/maglev.pb.go +++ b/internal/grpcapi/maglev.pb.go @@ -1556,6 +1556,7 @@ type FrontendInfo struct { Pools []*PoolInfo `protobuf:"bytes,5,rep,name=pools,proto3" json:"pools,omitempty"` Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` SrcIpSticky bool `protobuf:"varint,7,opt,name=src_ip_sticky,json=srcIpSticky,proto3" json:"src_ip_sticky,omitempty"` // VPP LB uses src-IP-based stickiness for this VIP + FlushOnDown bool `protobuf:"varint,8,opt,name=flush_on_down,json=flushOnDown,proto3" json:"flush_on_down,omitempty"` // tear down existing flows when a backend goes down unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1639,6 +1640,13 @@ func (x *FrontendInfo) GetSrcIpSticky() bool { return false } +func (x *FrontendInfo) GetFlushOnDown() bool { + if x != nil { + return x.FlushOnDown + } + return false +} + type ListBackendsResponse struct { state protoimpl.MessageState `protogen:"open.v1"` BackendNames []string `protobuf:"bytes,1,rep,name=backend_names,json=backendNames,proto3" json:"backend_names,omitempty"` @@ -2596,7 +2604,7 @@ const file_proto_maglev_proto_rawDesc = "" + "\x10effective_weight\x18\x03 \x01(\x05R\x0feffectiveWeight\"S\n" + "\bPoolInfo\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x123\n" + - "\bbackends\x18\x02 \x03(\v2\x17.maglev.PoolBackendInfoR\bbackends\"\xda\x01\n" + + "\bbackends\x18\x02 \x03(\v2\x17.maglev.PoolBackendInfoR\bbackends\"\xfe\x01\n" + "\fFrontendInfo\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x1a\n" + @@ -2604,7 +2612,8 @@ const file_proto_maglev_proto_rawDesc = "" + "\x04port\x18\x04 \x01(\rR\x04port\x12&\n" + "\x05pools\x18\x05 \x03(\v2\x10.maglev.PoolInfoR\x05pools\x12 \n" + "\vdescription\x18\x06 \x01(\tR\vdescription\x12\"\n" + - "\rsrc_ip_sticky\x18\a \x01(\bR\vsrcIpSticky\";\n" + + "\rsrc_ip_sticky\x18\a \x01(\bR\vsrcIpSticky\x12\"\n" + + "\rflush_on_down\x18\b \x01(\bR\vflushOnDown\";\n" + "\x14ListBackendsResponse\x12#\n" + "\rbackend_names\x18\x01 \x03(\tR\fbackendNames\"0\n" + "\x18ListHealthChecksResponse\x12\x14\n" + diff --git a/internal/grpcapi/server.go b/internal/grpcapi/server.go index 168e631..9c7360a 100644 --- a/internal/grpcapi/server.go +++ b/internal/grpcapi/server.go @@ -509,6 +509,7 @@ func frontendToProto(name string, fe config.Frontend, src vpp.StateSource) *Fron Description: fe.Description, Pools: pools, SrcIpSticky: fe.SrcIPSticky, + FlushOnDown: fe.FlushOnDown, } } diff --git a/internal/health/weights.go b/internal/health/weights.go index e94fbf0..1c80844 100644 --- a/internal/health/weights.go +++ b/internal/health/weights.go @@ -27,27 +27,39 @@ func ActivePoolIndex(fe config.Frontend, states map[string]State) int { } // BackendEffectiveWeight is the pure mapping from (pool index, active pool, -// backend state, config weight) to the desired VPP AS weight and flush hint. -// This is the single source of truth for the state → dataplane rule. +// backend state, config weight, flush-on-down policy) to the desired VPP AS +// weight and flush hint. This is the single source of truth for the state +// → dataplane rule. // // A backend gets its configured weight iff it is up AND belongs to the -// currently-active pool. Every other case yields weight 0. Only StateDisabled -// produces flush=true (immediate session teardown). +// currently-active pool. Every other case yields weight 0. +// +// The flush hint controls whether VPP tears down existing flows pinned to +// the AS on the weight update (is_flush=true on lb_as_set_weight) or merely +// stops accepting new flows (drain, keep existing). StateDisabled always +// flushes — it's an operator-driven "this AS is going away" signal. StateDown +// flushes iff the frontend has flush-on-down enabled; the default is true, +// because rise/fall debouncing in the health checker already absorbs flaps +// and a fall-counted down is almost always a real outage the operator wants +// cleared from the session table fast. Unknown / paused never flush — +// unknown is pre-probe, and paused is an explicit drain-don't-kill signal. // // state in active pool not in active pool flush -// -------- -------------- ------------------- ----- +// -------- -------------- ------------------- ---------------- // unknown 0 0 no // up configured 0 (standby) no -// down 0 0 no +// down 0 0 flushOnDown // paused 0 0 no // disabled 0 0 yes -func BackendEffectiveWeight(poolIdx, activePool int, state State, cfgWeight int) (weight uint8, flush bool) { +func BackendEffectiveWeight(poolIdx, activePool int, state State, cfgWeight int, flushOnDown bool) (weight uint8, flush bool) { switch state { case StateUp: if poolIdx == activePool { return clampWeight(cfgWeight), false } return 0, false + case StateDown: + return 0, flushOnDown case StateDisabled: return 0, true default: @@ -63,7 +75,7 @@ func EffectiveWeights(fe config.Frontend, states map[string]State) map[int]map[s for poolIdx, pool := range fe.Pools { out[poolIdx] = make(map[string]uint8, len(pool.Backends)) for bName, pb := range pool.Backends { - w, _ := BackendEffectiveWeight(poolIdx, activePool, states[bName], pb.Weight) + w, _ := BackendEffectiveWeight(poolIdx, activePool, states[bName], pb.Weight, fe.FlushOnDown) out[poolIdx][bName] = w } } diff --git a/internal/health/weights_test.go b/internal/health/weights_test.go index 9936f79..776aa86 100644 --- a/internal/health/weights_test.go +++ b/internal/health/weights_test.go @@ -14,41 +14,48 @@ import ( // the behavior has deliberately changed. func TestBackendEffectiveWeight(t *testing.T) { cases := []struct { - name string - poolIdx int - activePool int - state State - cfgWeight int - wantWeight uint8 - wantFlush bool + name string + poolIdx int + activePool int + state State + cfgWeight int + flushOnDown bool + wantWeight uint8 + wantFlush bool }{ - {"up active w100", 0, 0, StateUp, 100, 100, false}, - {"up active w50", 0, 0, StateUp, 50, 50, false}, - {"up active w0", 0, 0, StateUp, 0, 0, false}, - {"up active clamp-high", 0, 0, StateUp, 150, 100, false}, - {"up active clamp-low", 0, 0, StateUp, -5, 0, false}, + {"up active w100", 0, 0, StateUp, 100, true, 100, false}, + {"up active w50", 0, 0, StateUp, 50, true, 50, false}, + {"up active w0", 0, 0, StateUp, 0, true, 0, false}, + {"up active clamp-high", 0, 0, StateUp, 150, true, 100, false}, + {"up active clamp-low", 0, 0, StateUp, -5, true, 0, false}, - {"up standby pool0 active=1", 0, 1, StateUp, 100, 0, false}, - {"up standby pool1 active=0", 1, 0, StateUp, 100, 0, false}, - {"up standby pool2 active=0", 2, 0, StateUp, 100, 0, false}, + {"up standby pool0 active=1", 0, 1, StateUp, 100, true, 0, false}, + {"up standby pool1 active=0", 1, 0, StateUp, 100, true, 0, false}, + {"up standby pool2 active=0", 2, 0, StateUp, 100, true, 0, false}, - {"up failover pool1 active=1", 1, 1, StateUp, 100, 100, false}, + {"up failover pool1 active=1", 1, 1, StateUp, 100, true, 100, false}, - {"unknown pool0 active=0", 0, 0, StateUnknown, 100, 0, false}, - {"unknown pool1 active=0", 1, 0, StateUnknown, 100, 0, false}, + {"unknown pool0 active=0", 0, 0, StateUnknown, 100, true, 0, false}, + {"unknown pool1 active=0", 1, 0, StateUnknown, 100, true, 0, false}, - {"down pool0 active=0", 0, 0, StateDown, 100, 0, false}, - {"down pool1 active=1", 1, 1, StateDown, 100, 0, false}, + // flush-on-down policy: default true flushes, explicit false drains. + {"down pool0 flushOnDown=true", 0, 0, StateDown, 100, true, 0, true}, + {"down pool1 flushOnDown=true", 1, 1, StateDown, 100, true, 0, true}, + {"down pool0 flushOnDown=false", 0, 0, StateDown, 100, false, 0, false}, + {"down pool1 flushOnDown=false", 1, 1, StateDown, 100, false, 0, false}, - {"paused pool0 active=0", 0, 0, StatePaused, 100, 0, false}, + // paused never flushes — drain-don't-kill is the whole point. + {"paused pool0 flushOnDown=true", 0, 0, StatePaused, 100, true, 0, false}, + {"paused pool0 flushOnDown=false", 0, 0, StatePaused, 100, false, 0, false}, - {"disabled pool0 active=0", 0, 0, StateDisabled, 100, 0, true}, - {"disabled pool1 active=1", 1, 1, StateDisabled, 100, 0, true}, + // disabled always flushes regardless of flush-on-down policy. + {"disabled pool0 flushOnDown=true", 0, 0, StateDisabled, 100, true, 0, true}, + {"disabled pool1 flushOnDown=false", 1, 1, StateDisabled, 100, false, 0, true}, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - w, f := BackendEffectiveWeight(tc.poolIdx, tc.activePool, tc.state, tc.cfgWeight) + w, f := BackendEffectiveWeight(tc.poolIdx, tc.activePool, tc.state, tc.cfgWeight, tc.flushOnDown) if w != tc.wantWeight { t.Errorf("weight: got %d, want %d", w, tc.wantWeight) } diff --git a/internal/vpp/lbsync.go b/internal/vpp/lbsync.go index 11c0184..600ef4f 100644 --- a/internal/vpp/lbsync.go +++ b/internal/vpp/lbsync.go @@ -455,7 +455,7 @@ func desiredFromFrontend(cfg *config.Config, fe config.Frontend, src StateSource if _, already := d.ASes[addr]; already { continue } - w, flush := health.BackendEffectiveWeight(poolIdx, activePool, states[bName], pb.Weight) + w, flush := health.BackendEffectiveWeight(poolIdx, activePool, states[bName], pb.Weight, fe.FlushOnDown) d.ASes[addr] = desiredAS{ Address: b.Address, Weight: w, diff --git a/proto/maglev.proto b/proto/maglev.proto index a443d25..86f5210 100644 --- a/proto/maglev.proto +++ b/proto/maglev.proto @@ -212,6 +212,7 @@ message FrontendInfo { repeated PoolInfo pools = 5; string description = 6; bool src_ip_sticky = 7; // VPP LB uses src-IP-based stickiness for this VIP + bool flush_on_down = 8; // tear down existing flows when a backend goes down } message ListBackendsResponse {