Files
vpp-maglev/internal/vpp/lbsync_test.go
Pim van Pelt 6d78921edd Restart-neutral VPP LB sync; deterministic AS ordering; maglevt cadence; v0.9.5
Three reliability fixes bundled with docs updates.

Restart-neutral VPP LB sync via a startup warmup window
(internal/vpp/warmup.go). Before this, a maglevd restart would
immediately issue SyncLBStateAll with every backend still in
StateUnknown — mapped through BackendEffectiveWeight to weight
0 — and VPP would black-hole all new flows until the checker's
rise counters caught up, several seconds later. The new warmup
tracker owns a process-wide state machine gated by two config
knobs: vpp.lb.startup-min-delay (default 5s) is an absolute
hands-off window during which neither the periodic sync loop
nor the per-transition reconciler touches VPP; vpp.lb.
startup-max-delay (default 30s) is the watchdog for a per-VIP
release phase that runs between the two, releasing each frontend
as soon as every backend it references reaches a non-Unknown
state. At max-delay a final SyncLBStateAll runs for any stragglers
still in Unknown. Config reload does not reset the clock. Both
delays can be set to 0 to disable the warmup entirely. The
reconciler's suppressed-during-warmup events log at DEBUG so
operators can still see them with --log-level debug. Unit tests
cover the tracker state machine, allBackendsKnown precondition,
and the zero-delay escape hatch.

Deterministic AS iteration in VPP LB sync. reconcileVIP and
recreateVIP now issue their lb_as_add_del / lb_as_set_weight
calls in numeric IP order (IPv4 before IPv6, ascending within
each family) via a new sortedIPKeys helper, instead of Go map
iteration order. VPP's LB plugin breaks per-bucket ties in the
Maglev lookup table by insertion position in its internal AS
vec, so without a stable call order two maglevd instances on
the same config could push identical AS sets into VPP in
different orders and produce divergent new-flow tables. Numeric
sort is used in preference to lexicographic so the sync log
stays human-readable: string order would place 10.0.0.10 before
10.0.0.2, and the same problem in v6. Unit tests cover empty,
single, v4/v6 numeric vs lexicographic, v4-before-v6 grouping,
a 1000-iteration stability loop against Go's randomised map
iteration, insertion-order invariance, and the desiredAS
call-site type.

maglevt interval fix. runProbeLoop used to sleep the full
jittered interval after every probe, so a 100ms --interval
with a 30ms probe actually produced a 130ms period. The sleep
now subtracts result.Duration so cadence matches the flag.
Probes that overrun clamp sleep to zero and fire the next
probe immediately without trying to catch up on missed cycles
— a slow backend doesn't get flooded with back-to-back probes
at the moment it's already struggling.

Docs. config-guide now documents flush-on-down and the new
startup-min-delay / startup-max-delay knobs; user-guide's
maglevd section explains the restart-neutrality property, the
three warmup phases, and the relevant slog lines operators
should watch for during a bounce.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-15 12:53:42 +02:00

645 lines
20 KiB
Go

// Copyright (c) 2026, Pim van Pelt <pim@ipng.ch>
package vpp
import (
"net"
"testing"
"git.ipng.ch/ipng/vpp-maglev/internal/config"
"git.ipng.ch/ipng/vpp-maglev/internal/health"
)
// TestParseLBVIPSnapshot pins the parser for `show lb vips verbose` output.
// The text below is a synthetic sample that mirrors format_lb_vip_detailed
// in src/plugins/lb/lb.c: a header line per VIP optionally carrying the
// src_ip_sticky token, followed by a protocol:/port: sub-line for non all-
// port VIPs. If VPP changes this format the test will fail loudly — the
// scrape is a temporary workaround until lb_vip_v2_dump exists.
func TestParseLBVIPSnapshot(t *testing.T) {
text := ` ip4-gre4 [1] 192.0.2.1/32 src_ip_sticky
new_size:1024
protocol:6 port:80
counters:
ip4-gre4 [2] 192.0.2.2/32
new_size:1024
protocol:17 port:53
ip6-gre6 [3] 2001:db8::1/128 src_ip_sticky
new_size:1024
protocol:6 port:443
ip4-gre4 [4] 192.0.2.3/32
new_size:1024
`
got := parseLBVIPSnapshot(text)
want := map[vipKey]lbVIPSnapshot{
{prefix: "192.0.2.1/32", protocol: 6, port: 80}: {index: 1, sticky: true},
{prefix: "192.0.2.2/32", protocol: 17, port: 53}: {index: 2, sticky: false},
{prefix: "2001:db8::1/128", protocol: 6, port: 443}: {index: 3, sticky: true},
{prefix: "192.0.2.3/32", protocol: 255, port: 0}: {index: 4, sticky: false}, // all-port VIP
}
if len(got) != len(want) {
t.Errorf("got %d entries, want %d: %#v", len(got), len(want), got)
}
for k, v := range want {
g, ok := got[k]
if !ok {
t.Errorf("missing key %+v", k)
continue
}
if g != v {
t.Errorf("key %+v: got %+v, want %+v", k, g, v)
}
}
}
// fakeStateSource implements StateSource from a static map.
type fakeStateSource struct {
cfg *config.Config
states map[string]health.State
}
func (f *fakeStateSource) Config() *config.Config { return f.cfg }
func (f *fakeStateSource) BackendState(name string) (health.State, bool) {
s, ok := f.states[name]
return s, ok
}
// TestDesiredFromFrontendFailover is the end-to-end integration test for
// priority-failover in the VPP sync path: given a frontend with two pools,
// the desired weights flip between pools based on which has any up backends.
// This exercises vpp.desiredFromFrontend which wraps the pure helpers in
// the health package; those helpers are unit-tested separately in health.
func TestDesiredFromFrontendFailover(t *testing.T) {
ip := func(s string) net.IP { return net.ParseIP(s).To4() }
cfg := &config.Config{
Backends: map[string]config.Backend{
"p1": {Address: ip("10.0.0.1"), Enabled: true},
"p2": {Address: ip("10.0.0.2"), Enabled: true},
"s1": {Address: ip("10.0.0.11"), Enabled: true},
"s2": {Address: ip("10.0.0.12"), Enabled: true},
},
}
fe := config.Frontend{
Address: ip("192.0.2.1"),
Protocol: "tcp",
Port: 80,
Pools: []config.Pool{
{Name: "primary", Backends: map[string]config.PoolBackend{
"p1": {Weight: 100},
"p2": {Weight: 100},
}},
{Name: "fallback", Backends: map[string]config.PoolBackend{
"s1": {Weight: 100},
"s2": {Weight: 100},
}},
},
}
tests := []struct {
name string
states map[string]health.State
want map[string]uint8 // backend IP → expected weight
}{
{
name: "primary all up → primary serves, secondary standby",
states: map[string]health.State{
"p1": health.StateUp, "p2": health.StateUp,
"s1": health.StateUp, "s2": health.StateUp,
},
want: map[string]uint8{
"10.0.0.1": 100, "10.0.0.2": 100,
"10.0.0.11": 0, "10.0.0.12": 0,
},
},
{
name: "primary 1 up → primary still serves",
states: map[string]health.State{
"p1": health.StateDown, "p2": health.StateUp,
"s1": health.StateUp, "s2": health.StateUp,
},
want: map[string]uint8{
"10.0.0.1": 0, "10.0.0.2": 100,
"10.0.0.11": 0, "10.0.0.12": 0,
},
},
{
name: "primary all down → failover to secondary",
states: map[string]health.State{
"p1": health.StateDown, "p2": health.StateDown,
"s1": health.StateUp, "s2": health.StateUp,
},
want: map[string]uint8{
"10.0.0.1": 0, "10.0.0.2": 0,
"10.0.0.11": 100, "10.0.0.12": 100,
},
},
{
name: "primary all disabled → failover",
states: map[string]health.State{
"p1": health.StateDisabled, "p2": health.StateDisabled,
"s1": health.StateUp, "s2": health.StateUp,
},
want: map[string]uint8{
"10.0.0.1": 0, "10.0.0.2": 0,
"10.0.0.11": 100, "10.0.0.12": 100,
},
},
{
name: "everything down → all zero, no serving",
states: map[string]health.State{
"p1": health.StateDown, "p2": health.StateDown,
"s1": health.StateDown, "s2": health.StateDown,
},
want: map[string]uint8{
"10.0.0.1": 0, "10.0.0.2": 0,
"10.0.0.11": 0, "10.0.0.12": 0,
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
src := &fakeStateSource{cfg: cfg, states: tc.states}
d := desiredFromFrontend(cfg, fe, src)
for addr, wantW := range tc.want {
got, ok := d.ASes[addr]
if !ok {
t.Errorf("%s: missing from desired set", addr)
continue
}
if got.Weight != wantW {
t.Errorf("%s: weight got %d, want %d", addr, got.Weight, wantW)
}
}
if len(d.ASes) != len(tc.want) {
t.Errorf("got %d ASes, want %d", len(d.ASes), len(tc.want))
}
})
}
}
// TestDesiredFromFrontendSharedBackend exercises the exact shape of
// maglev.yaml: two frontends that share three backends across primary
// and fallback pools with different per-pool weights. The key
// invariants being pinned:
//
// - Each frontend's desiredFromFrontend must read its own
// per-pool-membership weights, never leaking weights from a sibling
// frontend's pool config.
// - When the primary pool has at least one backend up, the fallback
// pool's backends must all be weight=0 (standby).
// - When every primary-pool backend is non-up (down / paused /
// disabled), failover kicks in: the fallback pool's backends get
// their configured weights, and primary-pool backends stay at 0.
//
// Frontends modelled below:
//
// nginx-ip4-http:
// primary: nginx0-frggh0 w=10, nginx0-nlams0 w=100
// fallback: nginx0-chlzn0 w=100
//
// nginx-ip6-https:
// primary: nginx0-frggh0 w=100
// fallback: nginx0-nlams0 w=100, nginx0-chlzn0 w=100
//
// Note that nginx0-frggh0 is configured with weight 10 in the ip4
// primary but 100 in the ip6 primary — this is the exact crossed
// configuration that the user reported as producing weight=10 in the
// ip6 VIP (a regression).
func TestDesiredFromFrontendSharedBackend(t *testing.T) {
ip := func(s string) net.IP { return net.ParseIP(s).To4() }
frggh := "198.19.6.76"
nlams := "198.19.4.118"
chlzn := "198.19.6.167"
cfg := &config.Config{
Backends: map[string]config.Backend{
"nginx0-frggh0": {Address: ip(frggh), Enabled: true},
"nginx0-nlams0": {Address: ip(nlams), Enabled: true},
"nginx0-chlzn0": {Address: ip(chlzn), Enabled: true},
},
}
feIP4 := config.Frontend{
Address: ip("198.19.0.254"),
Protocol: "tcp",
Port: 80,
Pools: []config.Pool{
{Name: "primary", Backends: map[string]config.PoolBackend{
"nginx0-frggh0": {Weight: 10},
"nginx0-nlams0": {Weight: 100},
}},
{Name: "fallback", Backends: map[string]config.PoolBackend{
"nginx0-chlzn0": {Weight: 100},
}},
},
}
feIP6 := config.Frontend{
Address: net.ParseIP("2001:db8::1"),
Protocol: "tcp",
Port: 443,
Pools: []config.Pool{
{Name: "primary", Backends: map[string]config.PoolBackend{
"nginx0-frggh0": {Weight: 100},
}},
{Name: "fallback", Backends: map[string]config.PoolBackend{
"nginx0-nlams0": {Weight: 100},
"nginx0-chlzn0": {Weight: 100},
}},
},
}
type want struct {
ip4 map[string]uint8
ip6 map[string]uint8
}
tests := []struct {
name string
states map[string]health.State
want want
}{
{
name: "all up — each primary serves with its own weights",
states: map[string]health.State{
"nginx0-frggh0": health.StateUp,
"nginx0-nlams0": health.StateUp,
"nginx0-chlzn0": health.StateUp,
},
want: want{
ip4: map[string]uint8{frggh: 10, nlams: 100, chlzn: 0},
ip6: map[string]uint8{frggh: 100, nlams: 0, chlzn: 0},
},
},
{
name: "frggh0 disabled — ip4 primary still served by nlams0, ip6 fails over to fallback",
states: map[string]health.State{
"nginx0-frggh0": health.StateDisabled,
"nginx0-nlams0": health.StateUp,
"nginx0-chlzn0": health.StateUp,
},
want: want{
// ip4 primary still has nlams0 up, so stays on primary;
// frggh0 is in primary but disabled → 0.
ip4: map[string]uint8{frggh: 0, nlams: 100, chlzn: 0},
// ip6 primary has only frggh0 (disabled) → fallback
// pool activates and both of its backends get their
// configured weights.
ip6: map[string]uint8{frggh: 0, nlams: 100, chlzn: 100},
},
},
{
name: "frggh0 paused — same failover shape as disabled for ip6",
states: map[string]health.State{
"nginx0-frggh0": health.StatePaused,
"nginx0-nlams0": health.StateUp,
"nginx0-chlzn0": health.StateUp,
},
want: want{
ip4: map[string]uint8{frggh: 0, nlams: 100, chlzn: 0},
ip6: map[string]uint8{frggh: 0, nlams: 100, chlzn: 100},
},
},
{
name: "frggh0 down — same failover shape as disabled for ip6",
states: map[string]health.State{
"nginx0-frggh0": health.StateDown,
"nginx0-nlams0": health.StateUp,
"nginx0-chlzn0": health.StateUp,
},
want: want{
ip4: map[string]uint8{frggh: 0, nlams: 100, chlzn: 0},
ip6: map[string]uint8{frggh: 0, nlams: 100, chlzn: 100},
},
},
{
name: "ip4 primary all down → failover to chlzn0; ip6 unaffected",
states: map[string]health.State{
"nginx0-frggh0": health.StateDown,
"nginx0-nlams0": health.StateDown,
"nginx0-chlzn0": health.StateUp,
},
want: want{
// ip4 primary has nothing up → fallback activates.
ip4: map[string]uint8{frggh: 0, nlams: 0, chlzn: 100},
// ip6 primary has frggh0 (down) → fallback activates
// too; nlams0 is in ip6 fallback but down, chlzn0 is
// up and carries traffic.
ip6: map[string]uint8{frggh: 0, nlams: 0, chlzn: 100},
},
},
{
name: "all backends down → everyone zero",
states: map[string]health.State{
"nginx0-frggh0": health.StateDown,
"nginx0-nlams0": health.StateDown,
"nginx0-chlzn0": health.StateDown,
},
want: want{
ip4: map[string]uint8{frggh: 0, nlams: 0, chlzn: 0},
ip6: map[string]uint8{frggh: 0, nlams: 0, chlzn: 0},
},
},
{
name: "all backends disabled → everyone zero (and flushed)",
states: map[string]health.State{
"nginx0-frggh0": health.StateDisabled,
"nginx0-nlams0": health.StateDisabled,
"nginx0-chlzn0": health.StateDisabled,
},
want: want{
ip4: map[string]uint8{frggh: 0, nlams: 0, chlzn: 0},
ip6: map[string]uint8{frggh: 0, nlams: 0, chlzn: 0},
},
},
{
name: "frggh0 re-enabled and up — each frontend returns to its own configured weight (regression)",
states: map[string]health.State{
"nginx0-frggh0": health.StateUp,
"nginx0-nlams0": health.StateUp,
"nginx0-chlzn0": health.StateUp,
},
want: want{
// This is the specific regression the user reported:
// after a disable/enable cycle, the ip6 VIP should
// return to weight=100 for frggh0 (its own pool's
// configured weight), not 10 (ip4's weight).
ip4: map[string]uint8{frggh: 10, nlams: 100, chlzn: 0},
ip6: map[string]uint8{frggh: 100, nlams: 0, chlzn: 0},
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
src := &fakeStateSource{cfg: cfg, states: tc.states}
d4 := desiredFromFrontend(cfg, feIP4, src)
for addr, w := range tc.want.ip4 {
got, ok := d4.ASes[addr]
if !ok {
t.Errorf("ip4: %s missing from desired set", addr)
continue
}
if got.Weight != w {
t.Errorf("ip4: %s weight got %d, want %d", addr, got.Weight, w)
}
}
if len(d4.ASes) != len(tc.want.ip4) {
t.Errorf("ip4: got %d ASes, want %d", len(d4.ASes), len(tc.want.ip4))
}
d6 := desiredFromFrontend(cfg, feIP6, src)
for addr, w := range tc.want.ip6 {
got, ok := d6.ASes[addr]
if !ok {
t.Errorf("ip6: %s missing from desired set", addr)
continue
}
if got.Weight != w {
t.Errorf("ip6: %s weight got %d, want %d", addr, got.Weight, w)
}
}
if len(d6.ASes) != len(tc.want.ip6) {
t.Errorf("ip6: got %d ASes, want %d", len(d6.ASes), len(tc.want.ip6))
}
// Also exercise desiredFromConfig (the batch version used
// by the 30-second periodic SyncLBStateAll): it iterates
// every frontend in cfg and must produce the same
// per-frontend weights as desiredFromFrontend called
// directly. A bug where one frontend's pool config leaks
// into another would show up here too.
cfgBatch := &config.Config{
Backends: cfg.Backends,
Frontends: map[string]config.Frontend{
"nginx-ip4-http": feIP4,
"nginx-ip6-https": feIP6,
},
}
batch := desiredFromConfig(cfgBatch, src)
byAddr := map[string]desiredVIP{}
for _, d := range batch {
byAddr[d.Prefix.IP.String()] = d
}
if d := byAddr["198.19.0.254"]; true {
for addr, w := range tc.want.ip4 {
if got := d.ASes[addr].Weight; got != w {
t.Errorf("batch ip4: %s weight got %d, want %d", addr, got, w)
}
}
}
if d := byAddr["2001:db8::1"]; true {
for addr, w := range tc.want.ip6 {
if got := d.ASes[addr].Weight; got != w {
t.Errorf("batch ip6: %s weight got %d, want %d", addr, got, w)
}
}
}
})
}
}
// TestSortedIPKeysDeterministic pins the iteration-order helper that
// reconcileVIP and recreateVIP use to sequence their lb_as_add_del
// calls. The Maglev lookup table in VPP's LB plugin breaks per-bucket
// ties by the order ASes sit in its internal vec, which is just the
// order maglevd issued add calls — so if this helper ever stops
// returning a total, stable ordering, two independent maglevd
// instances on the same config can silently program different
// new-flow tables.
//
// Sort order is numeric (by the parsed net.IP), not lexicographic.
// The specific cases that a string sort would get wrong and a
// numeric sort must get right:
//
// - 10.0.0.2 < 10.0.0.10 (string sort puts "10" before "2")
// - 2001:db8::2 < 2001:db8::10 (same issue in v6)
// - all IPv4 before all IPv6 (operator-friendly grouping)
func TestSortedIPKeysDeterministic(t *testing.T) {
t.Run("empty", func(t *testing.T) {
got := sortedIPKeys(map[string]int{})
if len(got) != 0 {
t.Errorf("empty map: got %v, want []", got)
}
})
t.Run("single entry", func(t *testing.T) {
got := sortedIPKeys(map[string]int{"10.0.0.1": 1})
if len(got) != 1 || got[0] != "10.0.0.1" {
t.Errorf("got %v, want [10.0.0.1]", got)
}
})
t.Run("v4 numeric order beats string order", func(t *testing.T) {
// The headline bug: "10.0.0.10" < "10.0.0.2" lexicographically
// because '1' < '2'. Numeric sort must place 2 before 10.
m := map[string]int{
"10.0.0.10": 1,
"10.0.0.2": 2,
"10.0.0.1": 3,
"10.0.0.11": 4,
}
got := sortedIPKeys(m)
want := []string{"10.0.0.1", "10.0.0.2", "10.0.0.10", "10.0.0.11"}
if len(got) != len(want) {
t.Fatalf("got %v, want %v", got, want)
}
for i := range want {
if got[i] != want[i] {
t.Errorf("pos %d: got %q, want %q", i, got[i], want[i])
}
}
})
t.Run("v6 numeric order beats string order", func(t *testing.T) {
// Same bug in v6: "2001:db8::10" < "2001:db8::2" lexicographically.
// The To16() canonical byte form handles both compressed and
// expanded forms correctly.
m := map[string]int{
"2001:db8::10": 1,
"2001:db8::2": 2,
"2001:db8::1": 3,
}
got := sortedIPKeys(m)
want := []string{"2001:db8::1", "2001:db8::2", "2001:db8::10"}
for i := range want {
if got[i] != want[i] {
t.Errorf("pos %d: got %q, want %q", i, got[i], want[i])
}
}
})
t.Run("v4 before v6", func(t *testing.T) {
// Mixed-family frontends: the operator-friendly order is
// the v4 block before the v6 block, each sorted numerically
// within its family.
m := map[string]int{
"2001:db8::1": 1,
"10.0.0.2": 2,
"10.0.0.1": 3,
"fe80::1": 4,
"192.168.0.1": 5,
}
got := sortedIPKeys(m)
want := []string{
"10.0.0.1", "10.0.0.2", "192.168.0.1",
"2001:db8::1", "fe80::1",
}
if len(got) != len(want) {
t.Fatalf("got %v, want %v", got, want)
}
for i := range want {
if got[i] != want[i] {
t.Errorf("pos %d: got %q, want %q", i, got[i], want[i])
}
}
})
t.Run("repeated calls produce identical sequence", func(t *testing.T) {
// Core determinism property: Go's map iteration is randomised,
// but sortedIPKeys must normalise it. Run the helper many
// times and compare every result to the first — if the
// normalisation ever breaks we'll see a divergence well within
// the loop count.
m := map[string]int{
"10.0.0.5": 1, "10.0.0.3": 2, "10.0.0.11": 3,
"10.0.0.2": 4, "10.0.0.4": 5, "10.0.0.20": 6,
}
first := sortedIPKeys(m)
for i := 0; i < 1000; i++ {
got := sortedIPKeys(m)
if len(got) != len(first) {
t.Fatalf("iter %d: length drift: got %v, first %v", i, got, first)
}
for j := range first {
if got[j] != first[j] {
t.Fatalf("iter %d pos %d: got %q, first %q", i, j, got[j], first[j])
}
}
}
})
t.Run("insertion order does not matter", func(t *testing.T) {
// A map built by inserting keys in ascending order must
// produce the same result as one built in descending order.
// Both go through the same normalisation.
asc := map[string]int{}
for _, k := range []string{"10.0.0.1", "10.0.0.2", "10.0.0.3", "10.0.0.10", "10.0.0.11"} {
asc[k] = 0
}
desc := map[string]int{}
for _, k := range []string{"10.0.0.11", "10.0.0.10", "10.0.0.3", "10.0.0.2", "10.0.0.1"} {
desc[k] = 0
}
gotAsc := sortedIPKeys(asc)
gotDesc := sortedIPKeys(desc)
if len(gotAsc) != len(gotDesc) {
t.Fatalf("length mismatch: asc %v, desc %v", gotAsc, gotDesc)
}
for i := range gotAsc {
if gotAsc[i] != gotDesc[i] {
t.Errorf("pos %d: asc %q, desc %q", i, gotAsc[i], gotDesc[i])
}
}
})
t.Run("desiredAS map", func(t *testing.T) {
// Exercise the actual call-site type: map[string]desiredAS.
// If the generic helper ever loses its type parameterisation
// this catches it at compile time (the call would fail).
m := map[string]desiredAS{
"10.0.0.9": {Address: net.ParseIP("10.0.0.9"), Weight: 100},
"10.0.0.11": {Address: net.ParseIP("10.0.0.11"), Weight: 100},
"10.0.0.5": {Address: net.ParseIP("10.0.0.5"), Weight: 50},
"10.0.0.1": {Address: net.ParseIP("10.0.0.1"), Weight: 25},
}
got := sortedIPKeys(m)
want := []string{"10.0.0.1", "10.0.0.5", "10.0.0.9", "10.0.0.11"}
for i := range want {
if got[i] != want[i] {
t.Errorf("pos %d: got %q, want %q", i, got[i], want[i])
}
}
})
}
// TestCompareIPNumeric pins the ordering comparator that sortedIPKeys
// delegates to. Split out so the v4/v6 boundary and nil-safety logic
// have named failure modes rather than being buried in the map-based
// subtests.
func TestCompareIPNumeric(t *testing.T) {
cases := []struct {
name string
a, b net.IP
want int // -1, 0, +1 (sign of compareIPNumeric)
}{
{"v4 numeric asc", net.ParseIP("10.0.0.2"), net.ParseIP("10.0.0.10"), -1},
{"v4 numeric desc", net.ParseIP("10.0.0.10"), net.ParseIP("10.0.0.2"), 1},
{"v4 equal", net.ParseIP("10.0.0.1"), net.ParseIP("10.0.0.1"), 0},
{"v6 numeric asc", net.ParseIP("2001:db8::2"), net.ParseIP("2001:db8::10"), -1},
{"v6 numeric desc", net.ParseIP("2001:db8::10"), net.ParseIP("2001:db8::2"), 1},
{"v4 before v6", net.ParseIP("192.168.0.1"), net.ParseIP("2001:db8::1"), -1},
{"v6 after v4", net.ParseIP("2001:db8::1"), net.ParseIP("192.168.0.1"), 1},
{"nil before v4", nil, net.ParseIP("10.0.0.1"), -1},
{"v4 after nil", net.ParseIP("10.0.0.1"), nil, 1},
{"nil equal nil", nil, nil, 0},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got := compareIPNumeric(tc.a, tc.b)
sign := func(x int) int {
switch {
case x < 0:
return -1
case x > 0:
return 1
}
return 0
}
if sign(got) != tc.want {
t.Errorf("got %d (sign %d), want sign %d", got, sign(got), tc.want)
}
})
}
}