233 lines
6.9 KiB
Go
233 lines
6.9 KiB
Go
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
package vpp
|
|
|
|
import (
|
|
"net"
|
|
"testing"
|
|
"time"
|
|
|
|
"git.ipng.ch/ipng/vpp-maglev/internal/config"
|
|
"git.ipng.ch/ipng/vpp-maglev/internal/health"
|
|
)
|
|
|
|
// TestWarmupTrackerBasic pins the state-machine transitions the tracker
|
|
// owns: inMinDelay, isReleased, tryRelease, finishAll, isAllDone. Time
|
|
// is manipulated by backdating startAt — there is no hidden global
|
|
// clock, so a test can assert behaviour at any simulated point in
|
|
// the [0, maxDelay] window by rewinding startAt before each step.
|
|
func TestWarmupTrackerBasic(t *testing.T) {
|
|
t.Run("min-delay gates everything", func(t *testing.T) {
|
|
w := newWarmupTracker()
|
|
w.configure(5*time.Second, 30*time.Second)
|
|
|
|
// At t=0 we are inside min-delay.
|
|
if !w.inMinDelay() {
|
|
t.Error("t=0: expected inMinDelay=true")
|
|
}
|
|
if w.isReleased("fe1") {
|
|
t.Error("t=0: expected isReleased(fe1)=false")
|
|
}
|
|
if w.tryRelease("fe1") {
|
|
t.Error("t=0: tryRelease should fail inside min-delay")
|
|
}
|
|
if w.isAllDone() {
|
|
t.Error("t=0: expected isAllDone=false")
|
|
}
|
|
})
|
|
|
|
t.Run("per-VIP release after min-delay", func(t *testing.T) {
|
|
w := newWarmupTracker()
|
|
w.configure(5*time.Second, 30*time.Second)
|
|
// Simulate t=10s by backdating startAt 10s into the past.
|
|
w.startAt = time.Now().Add(-10 * time.Second)
|
|
|
|
if w.inMinDelay() {
|
|
t.Error("t=10s: expected inMinDelay=false")
|
|
}
|
|
if w.isReleased("fe1") {
|
|
t.Error("t=10s: fe1 should not be released yet")
|
|
}
|
|
if !w.tryRelease("fe1") {
|
|
t.Error("t=10s: tryRelease(fe1) should succeed")
|
|
}
|
|
if !w.isReleased("fe1") {
|
|
t.Error("after tryRelease: isReleased(fe1) should be true")
|
|
}
|
|
// A second call returns true (already-released path).
|
|
if !w.tryRelease("fe1") {
|
|
t.Error("second tryRelease(fe1) should return true (already released)")
|
|
}
|
|
// Other VIPs are independent.
|
|
if w.isReleased("fe2") {
|
|
t.Error("releasing fe1 should not affect fe2")
|
|
}
|
|
})
|
|
|
|
t.Run("finishAll opens all gates", func(t *testing.T) {
|
|
w := newWarmupTracker()
|
|
w.configure(5*time.Second, 30*time.Second)
|
|
|
|
// Inside min-delay: fe1 not released.
|
|
if w.isReleased("fe1") {
|
|
t.Error("pre-finishAll: fe1 should not be released")
|
|
}
|
|
w.finishAll()
|
|
if !w.isAllDone() {
|
|
t.Error("post-finishAll: isAllDone should be true")
|
|
}
|
|
if !w.isReleased("fe1") {
|
|
t.Error("post-finishAll: every frontend should be released")
|
|
}
|
|
if w.inMinDelay() {
|
|
t.Error("post-finishAll: inMinDelay should be false")
|
|
}
|
|
// Second call is idempotent.
|
|
w.finishAll()
|
|
})
|
|
|
|
t.Run("doneChan closes on finishAll", func(t *testing.T) {
|
|
w := newWarmupTracker()
|
|
w.configure(5*time.Second, 30*time.Second)
|
|
|
|
select {
|
|
case <-w.doneChan():
|
|
t.Fatal("doneChan should not be readable before finishAll")
|
|
default:
|
|
}
|
|
w.finishAll()
|
|
select {
|
|
case <-w.doneChan():
|
|
// expected
|
|
case <-time.After(100 * time.Millisecond):
|
|
t.Fatal("doneChan should be readable after finishAll")
|
|
}
|
|
})
|
|
|
|
t.Run("configure is idempotent after first call", func(t *testing.T) {
|
|
w := newWarmupTracker()
|
|
w.configure(5*time.Second, 30*time.Second)
|
|
// Reload with shorter delays should be a no-op so a config
|
|
// reload mid-warmup doesn't move the goalposts.
|
|
w.configure(1*time.Second, 2*time.Second)
|
|
if w.minDelay != 5*time.Second {
|
|
t.Errorf("minDelay got %v, want %v", w.minDelay, 5*time.Second)
|
|
}
|
|
if w.maxDelay != 30*time.Second {
|
|
t.Errorf("maxDelay got %v, want %v", w.maxDelay, 30*time.Second)
|
|
}
|
|
})
|
|
}
|
|
|
|
// staticStateSource is a minimal StateSource for allBackendsKnown tests.
|
|
// It holds a fixed config and a static backend-state map; BackendState
|
|
// returns (state, true) for entries in the map and (StateUnknown, false)
|
|
// for everything else — matching how checker.Checker would report a
|
|
// backend that isn't under its watch.
|
|
type staticStateSource struct {
|
|
cfg *config.Config
|
|
states map[string]health.State
|
|
}
|
|
|
|
func (s *staticStateSource) Config() *config.Config { return s.cfg }
|
|
func (s *staticStateSource) BackendState(name string) (health.State, bool) {
|
|
st, ok := s.states[name]
|
|
return st, ok
|
|
}
|
|
|
|
// TestAllBackendsKnown pins the per-VIP release precondition. A
|
|
// frontend is eligible for release during the warmup phase iff every
|
|
// backend it references has reached a non-Unknown state. StateDown,
|
|
// StatePaused, and StateDisabled all count as "known" — the property
|
|
// is "has the checker reported at least once", not "is healthy".
|
|
func TestAllBackendsKnown(t *testing.T) {
|
|
ip := func(s string) net.IP { return net.ParseIP(s) }
|
|
fe := config.Frontend{
|
|
Address: ip("192.0.2.1"),
|
|
Protocol: "tcp",
|
|
Port: 80,
|
|
Pools: []config.Pool{
|
|
{Name: "primary", Backends: map[string]config.PoolBackend{
|
|
"b1": {Weight: 100},
|
|
"b2": {Weight: 100},
|
|
}},
|
|
{Name: "fallback", Backends: map[string]config.PoolBackend{
|
|
"b3": {Weight: 100},
|
|
}},
|
|
},
|
|
}
|
|
|
|
cases := []struct {
|
|
name string
|
|
states map[string]health.State
|
|
want bool
|
|
}{
|
|
{
|
|
name: "all up → known",
|
|
states: map[string]health.State{
|
|
"b1": health.StateUp, "b2": health.StateUp, "b3": health.StateUp,
|
|
},
|
|
want: true,
|
|
},
|
|
{
|
|
name: "mixed up/down/disabled → known",
|
|
// All of up, down, paused, disabled are "non-Unknown" and
|
|
// therefore counts as known; allBackendsKnown is about
|
|
// "has the checker reported once", not about health.
|
|
states: map[string]health.State{
|
|
"b1": health.StateDown, "b2": health.StateDisabled, "b3": health.StatePaused,
|
|
},
|
|
want: true,
|
|
},
|
|
{
|
|
name: "one still unknown → not known",
|
|
states: map[string]health.State{
|
|
"b1": health.StateUp, "b2": health.StateUnknown, "b3": health.StateUp,
|
|
},
|
|
want: false,
|
|
},
|
|
{
|
|
name: "backend not reported at all (checker doesn't know it) → not known",
|
|
states: map[string]health.State{"b1": health.StateUp, "b2": health.StateUp},
|
|
want: false,
|
|
},
|
|
{
|
|
name: "fallback pool unknown → not known",
|
|
states: map[string]health.State{
|
|
"b1": health.StateUp, "b2": health.StateUp, "b3": health.StateUnknown,
|
|
},
|
|
want: false,
|
|
},
|
|
}
|
|
|
|
for _, tc := range cases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
src := &staticStateSource{cfg: &config.Config{}, states: tc.states}
|
|
got := allBackendsKnown(fe, src)
|
|
if got != tc.want {
|
|
t.Errorf("got %v, want %v", got, tc.want)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestWarmupTrackerZeroDelays pins the "warmup disabled" escape hatch:
|
|
// with both delays set to 0, tryRelease succeeds immediately and
|
|
// isReleased returns true for every frontend without needing any
|
|
// state transitions. This is the configuration an operator picks
|
|
// when they'd rather take the brief startup black-hole than wait
|
|
// out the warmup — typically for tests and dev setups.
|
|
func TestWarmupTrackerZeroDelays(t *testing.T) {
|
|
w := newWarmupTracker()
|
|
w.configure(0, 0)
|
|
if w.inMinDelay() {
|
|
t.Error("min=0: expected inMinDelay=false immediately")
|
|
}
|
|
if !w.tryRelease("fe1") {
|
|
t.Error("min=0: tryRelease should succeed immediately")
|
|
}
|
|
if !w.isReleased("fe1") {
|
|
t.Error("min=0: isReleased(fe1) should be true after tryRelease")
|
|
}
|
|
}
|