Frontend flush-on-down policy; v0.9.3
Adds a per-frontend flush-on-down flag (default true) that causes maglevd to set is_flush=true on lb_as_set_weight when a backend transitions to StateDown, tearing down existing flows pinned to the dead AS instead of just draining them. rise/fall debouncing in the health checker already absorbs single-probe flaps, so a fall-counted down is almost always a real outage — and during a real outage the client-visible "connection refused" oscillation window (where VPP keeps steering existing flows at a dead AS until retry) is a reliability regression worth closing by default. Operators who want the pre-flag drain-only behaviour can set flush-on-down: false per frontend. BackendEffectiveWeight's truth table grows one axis: StateDown now returns (0, flushOnDown); StateDisabled still unconditionally flushes; StateUnknown / StatePaused still never flush. The unit test pins all four combinations. The flag surfaces in the gRPC FrontendInfo message and in `maglevc show frontend <name>` right next to src-ip-sticky.
This commit is contained in:
@@ -1556,6 +1556,7 @@ type FrontendInfo struct {
|
||||
Pools []*PoolInfo `protobuf:"bytes,5,rep,name=pools,proto3" json:"pools,omitempty"`
|
||||
Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
|
||||
SrcIpSticky bool `protobuf:"varint,7,opt,name=src_ip_sticky,json=srcIpSticky,proto3" json:"src_ip_sticky,omitempty"` // VPP LB uses src-IP-based stickiness for this VIP
|
||||
FlushOnDown bool `protobuf:"varint,8,opt,name=flush_on_down,json=flushOnDown,proto3" json:"flush_on_down,omitempty"` // tear down existing flows when a backend goes down
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -1639,6 +1640,13 @@ func (x *FrontendInfo) GetSrcIpSticky() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *FrontendInfo) GetFlushOnDown() bool {
|
||||
if x != nil {
|
||||
return x.FlushOnDown
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type ListBackendsResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
BackendNames []string `protobuf:"bytes,1,rep,name=backend_names,json=backendNames,proto3" json:"backend_names,omitempty"`
|
||||
@@ -2596,7 +2604,7 @@ const file_proto_maglev_proto_rawDesc = "" +
|
||||
"\x10effective_weight\x18\x03 \x01(\x05R\x0feffectiveWeight\"S\n" +
|
||||
"\bPoolInfo\x12\x12\n" +
|
||||
"\x04name\x18\x01 \x01(\tR\x04name\x123\n" +
|
||||
"\bbackends\x18\x02 \x03(\v2\x17.maglev.PoolBackendInfoR\bbackends\"\xda\x01\n" +
|
||||
"\bbackends\x18\x02 \x03(\v2\x17.maglev.PoolBackendInfoR\bbackends\"\xfe\x01\n" +
|
||||
"\fFrontendInfo\x12\x12\n" +
|
||||
"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
|
||||
"\aaddress\x18\x02 \x01(\tR\aaddress\x12\x1a\n" +
|
||||
@@ -2604,7 +2612,8 @@ const file_proto_maglev_proto_rawDesc = "" +
|
||||
"\x04port\x18\x04 \x01(\rR\x04port\x12&\n" +
|
||||
"\x05pools\x18\x05 \x03(\v2\x10.maglev.PoolInfoR\x05pools\x12 \n" +
|
||||
"\vdescription\x18\x06 \x01(\tR\vdescription\x12\"\n" +
|
||||
"\rsrc_ip_sticky\x18\a \x01(\bR\vsrcIpSticky\";\n" +
|
||||
"\rsrc_ip_sticky\x18\a \x01(\bR\vsrcIpSticky\x12\"\n" +
|
||||
"\rflush_on_down\x18\b \x01(\bR\vflushOnDown\";\n" +
|
||||
"\x14ListBackendsResponse\x12#\n" +
|
||||
"\rbackend_names\x18\x01 \x03(\tR\fbackendNames\"0\n" +
|
||||
"\x18ListHealthChecksResponse\x12\x14\n" +
|
||||
|
||||
Reference in New Issue
Block a user