Files
vpp-maglev/internal/grpcapi/server_test.go
Pim van Pelt d612086a5f Pools, CLI, versioning, Debian packaging, HTTPS fix
- Replaced flat `backends: [...]` list on frontends with an ordered `pools:`
  list; each pool has a name and a map of backends with per-pool weights (0–100,
  default 100). Pools express priority: first pool with a healthy backend wins.
- Removed global backend weight (was on the backend, now lives in the pool).
- Config validation enforces non-empty pools, non-empty pool names, weight
  range, and consistent address families across all pools of a frontend.

- Added `PoolBackendInfo { name, weight }` and changed `PoolInfo.backends` from
  `repeated string` to `repeated PoolBackendInfo` so weights are visible over
  the API.

- Full interactive shell with readline, tab completion, and `?` inline help.
- Command tree parser (Walk) handles fixed keywords and dynamic slot nodes;
  prefix matching with exact-match priority.
- Commands: `show version/frontends/frontend/backends/backend/healthchecks/
  healthcheck`, `set backend <name> pause|resume`, `quit`/`exit`.
- `show frontend` output is hierarchical (pools → backends) with per-backend
  weights and `[disabled]` notation; pool section uses fixed-width formatting
  so ANSI color codes don't corrupt tabwriter alignment.
- `-color` flag (default true) wraps static field labels in dark-blue ANSI;
  works correctly with tabwriter because all labels carry identical-length
  escape sequences.

- `cmd/version.go` package holds `version`, `commit`, `date` vars set at build
  time via `-ldflags -X`.
- `make build` / `make build-amd64` / `make build-arm64` all inject
  `VERSION=0.1.1`, `COMMIT_HASH` (from `git rev-parse --short HEAD`), and
  `DATE` (UTC ISO-8601).
- `maglevc` prints version on interactive startup and exposes `show version`.
- `maglevd` logs version/commit/date at startup; `-version` flag prints and exits.

- `doHTTPProbe` was building a `https://` target URL even though TLS was already
  applied to the connection inside `inNetns`. `http.Transport` then wrapped the
  connection in a second TLS layer, producing "http: server gave HTTP response
  to HTTPS client". Fixed by always using `http://` in the target URL.
- Added `TestHTTPSProbe` using `httptest.NewTLSServer` to cover the full path.

- New `docs/user-guide.md`: maglevd flags/signals, maglevc commands, shell
  completion, and command-tree parser walkthrough.
- New `docs/healthchecks.md`: state machine, rise/fall model, probe intervals,
  all transition events with log examples.
- Updated `docs/config-guide.md`: pools design, removed global weight from
  backends, updated all examples.
- Updated `README.md`: packaging table, build paths, corrected binary locations
  (`/usr/sbin/maglevd`), config filename (`.yaml`).

- `debian/` directory contains `control.in`, `maglevd.service`, `default.maglev`,
  `maglev.yaml` (example config), `conffiles`, `postinst`, `prerm`.
- `debian/build-deb.sh` stages a package tree and calls `dpkg-deb`; emits
  `build/vpp-maglev_<version>~<commit>_<arch>.deb`.
- Cross-compiles for amd64 and arm64 in one `make pkg-deb` invocation.
- `maglevd` installed to `/usr/sbin/`, `maglevc` to `/usr/bin/`.
- Service reads `MAGLEV_CONFIG` from `/etc/default/maglev`
  (default: `/etc/maglev/maglev.yaml`).
- Man pages `maglevd(8)` and `maglevc(1)` live in `docs/` and are gzip'd into
  the package.
- All build output goes to `build/<arch>/`; `build/` is gitignored.
2026-04-11 12:18:17 +02:00

319 lines
8.3 KiB
Go

// Copyright (c) 2026, Pim van Pelt <pim@ipng.ch>
package grpcapi
import (
"context"
"net"
"testing"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"git.ipng.ch/ipng/vpp-maglev/internal/checker"
"git.ipng.ch/ipng/vpp-maglev/internal/config"
"git.ipng.ch/ipng/vpp-maglev/internal/health"
)
func makeTestChecker(ctx context.Context) *checker.Checker {
cfg := &config.Config{
HealthChecker: config.HealthCheckerConfig{TransitionHistory: 5},
HealthChecks: map[string]config.HealthCheck{
"icmp": {
Type: "icmp",
Interval: time.Hour, // long interval: probes won't fire during tests
Timeout: time.Second,
Fall: 3,
Rise: 2,
},
},
Backends: map[string]config.Backend{
"be0": {
Address: net.ParseIP("10.0.0.2"),
HealthCheck: "icmp",
Enabled: true,
},
},
Frontends: map[string]config.Frontend{
"web": {
Address: net.ParseIP("192.0.2.1"),
Protocol: "tcp",
Port: 80,
Pools: []config.Pool{
{Name: "primary", Backends: map[string]config.PoolBackend{
"be0": {Weight: 100},
}},
},
},
},
}
c := checker.New(cfg)
go c.Run(ctx) //nolint:errcheck
// Allow the Run goroutine to initialize workers.
time.Sleep(10 * time.Millisecond)
return c
}
func startTestServer(t *testing.T, ctx context.Context, c *checker.Checker) (MaglevClient, func()) {
t.Helper()
lis, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("listen: %v", err)
}
srv := grpc.NewServer()
RegisterMaglevServer(srv, NewServer(ctx, c))
go srv.Serve(lis) //nolint:errcheck
conn, err := grpc.NewClient(lis.Addr().String(),
grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
t.Fatalf("dial: %v", err)
}
return NewMaglevClient(conn), func() {
conn.Close()
srv.Stop()
}
}
func TestListFrontends(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
resp, err := client.ListFrontends(ctx, &ListFrontendsRequest{})
if err != nil {
t.Fatalf("ListFrontends: %v", err)
}
if len(resp.FrontendNames) != 1 || resp.FrontendNames[0] != "web" {
t.Errorf("ListFrontends: got %v, want [web]", resp.FrontendNames)
}
}
func TestGetFrontend(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
info, err := client.GetFrontend(ctx, &GetFrontendRequest{Name: "web"})
if err != nil {
t.Fatalf("GetFrontend: %v", err)
}
if info.Address != "192.0.2.1" {
t.Errorf("GetFrontend address: got %q, want 192.0.2.1", info.Address)
}
if info.Port != 80 {
t.Errorf("GetFrontend port: got %d, want 80", info.Port)
}
if len(info.Pools) != 1 || info.Pools[0].Name != "primary" {
t.Errorf("GetFrontend pools: got %v, want [{primary [be0]}]", info.Pools)
}
if len(info.Pools[0].Backends) != 1 || info.Pools[0].Backends[0].Name != "be0" {
t.Errorf("GetFrontend pools[0].backends: got %v, want [{be0 100}]", info.Pools[0].Backends)
}
if info.Pools[0].Backends[0].Weight != 100 {
t.Errorf("GetFrontend pools[0].backends[0].weight: got %d, want 100", info.Pools[0].Backends[0].Weight)
}
}
func TestGetFrontendNotFound(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
_, err := client.GetFrontend(ctx, &GetFrontendRequest{Name: "nope"})
if err == nil {
t.Error("expected error for unknown frontend")
}
}
func TestListBackends(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
resp, err := client.ListBackends(ctx, &ListBackendsRequest{})
if err != nil {
t.Fatalf("ListBackends: %v", err)
}
if len(resp.BackendNames) != 1 || resp.BackendNames[0] != "be0" {
t.Errorf("ListBackends: got %v, want [be0]", resp.BackendNames)
}
}
func TestGetBackend(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
info, err := client.GetBackend(ctx, &GetBackendRequest{Name: "be0"})
if err != nil {
t.Fatalf("GetBackend: %v", err)
}
if info.State != health.StateUnknown.String() {
t.Errorf("initial state: got %q, want unknown", info.State)
}
if !info.Enabled {
t.Error("expected enabled=true")
}
if info.Healthcheck != "icmp" {
t.Errorf("healthcheck: got %q, want icmp", info.Healthcheck)
}
}
func TestGetBackendNotFound(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
_, err := client.GetBackend(ctx, &GetBackendRequest{Name: "nope"})
if err == nil {
t.Error("expected error for unknown backend")
}
}
func TestPauseResumeBackend(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
info, err := client.PauseBackend(ctx, &PauseResumeRequest{Name: "be0"})
if err != nil {
t.Fatalf("PauseBackend: %v", err)
}
if info.State != health.StatePaused.String() {
t.Errorf("after pause: got %q, want paused", info.State)
}
info, err = client.ResumeBackend(ctx, &PauseResumeRequest{Name: "be0"})
if err != nil {
t.Fatalf("ResumeBackend: %v", err)
}
if info.State != health.StateUnknown.String() {
t.Errorf("after resume: got %q, want unknown", info.State)
}
}
func TestListHealthChecks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
resp, err := client.ListHealthChecks(ctx, &ListHealthChecksRequest{})
if err != nil {
t.Fatalf("ListHealthChecks: %v", err)
}
if len(resp.Names) != 1 || resp.Names[0] != "icmp" {
t.Errorf("ListHealthChecks: got %v, want [icmp]", resp.Names)
}
}
func TestGetHealthCheck(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
info, err := client.GetHealthCheck(ctx, &GetHealthCheckRequest{Name: "icmp"})
if err != nil {
t.Fatalf("GetHealthCheck: %v", err)
}
if info.Type != "icmp" {
t.Errorf("type: got %q, want icmp", info.Type)
}
if info.Fall != 3 || info.Rise != 2 {
t.Errorf("fall/rise: got %d/%d, want 3/2", info.Fall, info.Rise)
}
}
func TestGetHealthCheckNotFound(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
_, err := client.GetHealthCheck(ctx, &GetHealthCheckRequest{Name: "nope"})
if err == nil {
t.Error("expected error for unknown healthcheck")
}
}
func TestWatchBackendEventsServerShutdown(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
// Use a separate server context so we can cancel it independently.
srvCtx, srvCancel := context.WithCancel(ctx)
client, cleanup := startTestServer(t, srvCtx, c)
defer cleanup()
stream, err := client.WatchBackendEvents(ctx, &WatchRequest{})
if err != nil {
t.Fatalf("WatchBackendEvents: %v", err)
}
// Drain the initial synthetic event.
if _, err := stream.Recv(); err != nil {
t.Fatalf("initial Recv: %v", err)
}
// Cancel the server context; the stream must terminate.
srvCancel()
_, err = stream.Recv()
if err == nil {
t.Fatal("expected stream to close after server shutdown, got nil error")
}
}
func TestWatchBackendEvents(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
stream, err := client.WatchBackendEvents(ctx, &WatchRequest{})
if err != nil {
t.Fatalf("WatchBackendEvents: %v", err)
}
// Should receive the current state for be0 immediately.
ev, err := stream.Recv()
if err != nil {
t.Fatalf("Recv: %v", err)
}
if ev.BackendName != "be0" {
t.Errorf("initial event: backend=%q, want be0", ev.BackendName)
}
}