// Copyright (c) 2026, Pim van Pelt package vpp import ( "net" "testing" "git.ipng.ch/ipng/vpp-maglev/internal/config" "git.ipng.ch/ipng/vpp-maglev/internal/health" ) // TestParseLBVIPSnapshot pins the parser for `show lb vips verbose` output. // The text below is a synthetic sample that mirrors format_lb_vip_detailed // in src/plugins/lb/lb.c: a header line per VIP optionally carrying the // src_ip_sticky token, followed by a protocol:/port: sub-line for non all- // port VIPs. If VPP changes this format the test will fail loudly — the // scrape is a temporary workaround until lb_vip_v2_dump exists. func TestParseLBVIPSnapshot(t *testing.T) { text := ` ip4-gre4 [1] 192.0.2.1/32 src_ip_sticky new_size:1024 protocol:6 port:80 counters: ip4-gre4 [2] 192.0.2.2/32 new_size:1024 protocol:17 port:53 ip6-gre6 [3] 2001:db8::1/128 src_ip_sticky new_size:1024 protocol:6 port:443 ip4-gre4 [4] 192.0.2.3/32 new_size:1024 ` got := parseLBVIPSnapshot(text) want := map[vipKey]lbVIPSnapshot{ {prefix: "192.0.2.1/32", protocol: 6, port: 80}: {index: 1, sticky: true}, {prefix: "192.0.2.2/32", protocol: 17, port: 53}: {index: 2, sticky: false}, {prefix: "2001:db8::1/128", protocol: 6, port: 443}: {index: 3, sticky: true}, {prefix: "192.0.2.3/32", protocol: 255, port: 0}: {index: 4, sticky: false}, // all-port VIP } if len(got) != len(want) { t.Errorf("got %d entries, want %d: %#v", len(got), len(want), got) } for k, v := range want { g, ok := got[k] if !ok { t.Errorf("missing key %+v", k) continue } if g != v { t.Errorf("key %+v: got %+v, want %+v", k, g, v) } } } // fakeStateSource implements StateSource from a static map. type fakeStateSource struct { cfg *config.Config states map[string]health.State } func (f *fakeStateSource) Config() *config.Config { return f.cfg } func (f *fakeStateSource) BackendState(name string) (health.State, bool) { s, ok := f.states[name] return s, ok } // TestDesiredFromFrontendFailover is the end-to-end integration test for // priority-failover in the VPP sync path: given a frontend with two pools, // the desired weights flip between pools based on which has any up backends. // This exercises vpp.desiredFromFrontend which wraps the pure helpers in // the health package; those helpers are unit-tested separately in health. func TestDesiredFromFrontendFailover(t *testing.T) { ip := func(s string) net.IP { return net.ParseIP(s).To4() } cfg := &config.Config{ Backends: map[string]config.Backend{ "p1": {Address: ip("10.0.0.1"), Enabled: true}, "p2": {Address: ip("10.0.0.2"), Enabled: true}, "s1": {Address: ip("10.0.0.11"), Enabled: true}, "s2": {Address: ip("10.0.0.12"), Enabled: true}, }, } fe := config.Frontend{ Address: ip("192.0.2.1"), Protocol: "tcp", Port: 80, Pools: []config.Pool{ {Name: "primary", Backends: map[string]config.PoolBackend{ "p1": {Weight: 100}, "p2": {Weight: 100}, }}, {Name: "fallback", Backends: map[string]config.PoolBackend{ "s1": {Weight: 100}, "s2": {Weight: 100}, }}, }, } tests := []struct { name string states map[string]health.State want map[string]uint8 // backend IP → expected weight }{ { name: "primary all up → primary serves, secondary standby", states: map[string]health.State{ "p1": health.StateUp, "p2": health.StateUp, "s1": health.StateUp, "s2": health.StateUp, }, want: map[string]uint8{ "10.0.0.1": 100, "10.0.0.2": 100, "10.0.0.11": 0, "10.0.0.12": 0, }, }, { name: "primary 1 up → primary still serves", states: map[string]health.State{ "p1": health.StateDown, "p2": health.StateUp, "s1": health.StateUp, "s2": health.StateUp, }, want: map[string]uint8{ "10.0.0.1": 0, "10.0.0.2": 100, "10.0.0.11": 0, "10.0.0.12": 0, }, }, { name: "primary all down → failover to secondary", states: map[string]health.State{ "p1": health.StateDown, "p2": health.StateDown, "s1": health.StateUp, "s2": health.StateUp, }, want: map[string]uint8{ "10.0.0.1": 0, "10.0.0.2": 0, "10.0.0.11": 100, "10.0.0.12": 100, }, }, { name: "primary all disabled → failover", states: map[string]health.State{ "p1": health.StateDisabled, "p2": health.StateDisabled, "s1": health.StateUp, "s2": health.StateUp, }, want: map[string]uint8{ "10.0.0.1": 0, "10.0.0.2": 0, "10.0.0.11": 100, "10.0.0.12": 100, }, }, { name: "everything down → all zero, no serving", states: map[string]health.State{ "p1": health.StateDown, "p2": health.StateDown, "s1": health.StateDown, "s2": health.StateDown, }, want: map[string]uint8{ "10.0.0.1": 0, "10.0.0.2": 0, "10.0.0.11": 0, "10.0.0.12": 0, }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { src := &fakeStateSource{cfg: cfg, states: tc.states} d := desiredFromFrontend(cfg, fe, src) for addr, wantW := range tc.want { got, ok := d.ASes[addr] if !ok { t.Errorf("%s: missing from desired set", addr) continue } if got.Weight != wantW { t.Errorf("%s: weight got %d, want %d", addr, got.Weight, wantW) } } if len(d.ASes) != len(tc.want) { t.Errorf("got %d ASes, want %d", len(d.ASes), len(tc.want)) } }) } } // TestDesiredFromFrontendSharedBackend exercises the exact shape of // maglev.yaml: two frontends that share three backends across primary // and fallback pools with different per-pool weights. The key // invariants being pinned: // // - Each frontend's desiredFromFrontend must read its own // per-pool-membership weights, never leaking weights from a sibling // frontend's pool config. // - When the primary pool has at least one backend up, the fallback // pool's backends must all be weight=0 (standby). // - When every primary-pool backend is non-up (down / paused / // disabled), failover kicks in: the fallback pool's backends get // their configured weights, and primary-pool backends stay at 0. // // Frontends modelled below: // // nginx-ip4-http: // primary: nginx0-frggh0 w=10, nginx0-nlams0 w=100 // fallback: nginx0-chlzn0 w=100 // // nginx-ip6-https: // primary: nginx0-frggh0 w=100 // fallback: nginx0-nlams0 w=100, nginx0-chlzn0 w=100 // // Note that nginx0-frggh0 is configured with weight 10 in the ip4 // primary but 100 in the ip6 primary — this is the exact crossed // configuration that the user reported as producing weight=10 in the // ip6 VIP (a regression). func TestDesiredFromFrontendSharedBackend(t *testing.T) { ip := func(s string) net.IP { return net.ParseIP(s).To4() } frggh := "198.19.6.76" nlams := "198.19.4.118" chlzn := "198.19.6.167" cfg := &config.Config{ Backends: map[string]config.Backend{ "nginx0-frggh0": {Address: ip(frggh), Enabled: true}, "nginx0-nlams0": {Address: ip(nlams), Enabled: true}, "nginx0-chlzn0": {Address: ip(chlzn), Enabled: true}, }, } feIP4 := config.Frontend{ Address: ip("198.19.0.254"), Protocol: "tcp", Port: 80, Pools: []config.Pool{ {Name: "primary", Backends: map[string]config.PoolBackend{ "nginx0-frggh0": {Weight: 10}, "nginx0-nlams0": {Weight: 100}, }}, {Name: "fallback", Backends: map[string]config.PoolBackend{ "nginx0-chlzn0": {Weight: 100}, }}, }, } feIP6 := config.Frontend{ Address: net.ParseIP("2001:db8::1"), Protocol: "tcp", Port: 443, Pools: []config.Pool{ {Name: "primary", Backends: map[string]config.PoolBackend{ "nginx0-frggh0": {Weight: 100}, }}, {Name: "fallback", Backends: map[string]config.PoolBackend{ "nginx0-nlams0": {Weight: 100}, "nginx0-chlzn0": {Weight: 100}, }}, }, } type want struct { ip4 map[string]uint8 ip6 map[string]uint8 } tests := []struct { name string states map[string]health.State want want }{ { name: "all up — each primary serves with its own weights", states: map[string]health.State{ "nginx0-frggh0": health.StateUp, "nginx0-nlams0": health.StateUp, "nginx0-chlzn0": health.StateUp, }, want: want{ ip4: map[string]uint8{frggh: 10, nlams: 100, chlzn: 0}, ip6: map[string]uint8{frggh: 100, nlams: 0, chlzn: 0}, }, }, { name: "frggh0 disabled — ip4 primary still served by nlams0, ip6 fails over to fallback", states: map[string]health.State{ "nginx0-frggh0": health.StateDisabled, "nginx0-nlams0": health.StateUp, "nginx0-chlzn0": health.StateUp, }, want: want{ // ip4 primary still has nlams0 up, so stays on primary; // frggh0 is in primary but disabled → 0. ip4: map[string]uint8{frggh: 0, nlams: 100, chlzn: 0}, // ip6 primary has only frggh0 (disabled) → fallback // pool activates and both of its backends get their // configured weights. ip6: map[string]uint8{frggh: 0, nlams: 100, chlzn: 100}, }, }, { name: "frggh0 paused — same failover shape as disabled for ip6", states: map[string]health.State{ "nginx0-frggh0": health.StatePaused, "nginx0-nlams0": health.StateUp, "nginx0-chlzn0": health.StateUp, }, want: want{ ip4: map[string]uint8{frggh: 0, nlams: 100, chlzn: 0}, ip6: map[string]uint8{frggh: 0, nlams: 100, chlzn: 100}, }, }, { name: "frggh0 down — same failover shape as disabled for ip6", states: map[string]health.State{ "nginx0-frggh0": health.StateDown, "nginx0-nlams0": health.StateUp, "nginx0-chlzn0": health.StateUp, }, want: want{ ip4: map[string]uint8{frggh: 0, nlams: 100, chlzn: 0}, ip6: map[string]uint8{frggh: 0, nlams: 100, chlzn: 100}, }, }, { name: "ip4 primary all down → failover to chlzn0; ip6 unaffected", states: map[string]health.State{ "nginx0-frggh0": health.StateDown, "nginx0-nlams0": health.StateDown, "nginx0-chlzn0": health.StateUp, }, want: want{ // ip4 primary has nothing up → fallback activates. ip4: map[string]uint8{frggh: 0, nlams: 0, chlzn: 100}, // ip6 primary has frggh0 (down) → fallback activates // too; nlams0 is in ip6 fallback but down, chlzn0 is // up and carries traffic. ip6: map[string]uint8{frggh: 0, nlams: 0, chlzn: 100}, }, }, { name: "all backends down → everyone zero", states: map[string]health.State{ "nginx0-frggh0": health.StateDown, "nginx0-nlams0": health.StateDown, "nginx0-chlzn0": health.StateDown, }, want: want{ ip4: map[string]uint8{frggh: 0, nlams: 0, chlzn: 0}, ip6: map[string]uint8{frggh: 0, nlams: 0, chlzn: 0}, }, }, { name: "all backends disabled → everyone zero (and flushed)", states: map[string]health.State{ "nginx0-frggh0": health.StateDisabled, "nginx0-nlams0": health.StateDisabled, "nginx0-chlzn0": health.StateDisabled, }, want: want{ ip4: map[string]uint8{frggh: 0, nlams: 0, chlzn: 0}, ip6: map[string]uint8{frggh: 0, nlams: 0, chlzn: 0}, }, }, { name: "frggh0 re-enabled and up — each frontend returns to its own configured weight (regression)", states: map[string]health.State{ "nginx0-frggh0": health.StateUp, "nginx0-nlams0": health.StateUp, "nginx0-chlzn0": health.StateUp, }, want: want{ // This is the specific regression the user reported: // after a disable/enable cycle, the ip6 VIP should // return to weight=100 for frggh0 (its own pool's // configured weight), not 10 (ip4's weight). ip4: map[string]uint8{frggh: 10, nlams: 100, chlzn: 0}, ip6: map[string]uint8{frggh: 100, nlams: 0, chlzn: 0}, }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { src := &fakeStateSource{cfg: cfg, states: tc.states} d4 := desiredFromFrontend(cfg, feIP4, src) for addr, w := range tc.want.ip4 { got, ok := d4.ASes[addr] if !ok { t.Errorf("ip4: %s missing from desired set", addr) continue } if got.Weight != w { t.Errorf("ip4: %s weight got %d, want %d", addr, got.Weight, w) } } if len(d4.ASes) != len(tc.want.ip4) { t.Errorf("ip4: got %d ASes, want %d", len(d4.ASes), len(tc.want.ip4)) } d6 := desiredFromFrontend(cfg, feIP6, src) for addr, w := range tc.want.ip6 { got, ok := d6.ASes[addr] if !ok { t.Errorf("ip6: %s missing from desired set", addr) continue } if got.Weight != w { t.Errorf("ip6: %s weight got %d, want %d", addr, got.Weight, w) } } if len(d6.ASes) != len(tc.want.ip6) { t.Errorf("ip6: got %d ASes, want %d", len(d6.ASes), len(tc.want.ip6)) } // Also exercise desiredFromConfig (the batch version used // by the 30-second periodic SyncLBStateAll): it iterates // every frontend in cfg and must produce the same // per-frontend weights as desiredFromFrontend called // directly. A bug where one frontend's pool config leaks // into another would show up here too. cfgBatch := &config.Config{ Backends: cfg.Backends, Frontends: map[string]config.Frontend{ "nginx-ip4-http": feIP4, "nginx-ip6-https": feIP6, }, } batch := desiredFromConfig(cfgBatch, src) byAddr := map[string]desiredVIP{} for _, d := range batch { byAddr[d.Prefix.IP.String()] = d } if d := byAddr["198.19.0.254"]; true { for addr, w := range tc.want.ip4 { if got := d.ASes[addr].Weight; got != w { t.Errorf("batch ip4: %s weight got %d, want %d", addr, got, w) } } } if d := byAddr["2001:db8::1"]; true { for addr, w := range tc.want.ip6 { if got := d.ASes[addr].Weight; got != w { t.Errorf("batch ip6: %s weight got %d, want %d", addr, got, w) } } } }) } }