Add WatchEvents, enable/disable/weight RPCs, and config check
gRPC / proto - Rename WatchBackendEvents → WatchEvents; return a stream of Event oneof (LogEvent, BackendEvent, FrontendEvent) with optional filter flags (log, log_level, backend, frontend) - Add EnableBackend, DisableBackend, SetFrontendPoolBackendWeight RPCs - Rename PauseResumeRequest → BackendRequest - Add CheckConfig RPC returning ok/parse_error/semantic_error maglevd - Route slog through a LogBroadcaster (slog.Handler) so WatchEvents subscribers can receive structured log records independently of the daemon's own --log-level - Add --reflection flag (default true) to toggle gRPC server reflection - Add --check flag: validates config file and exits 0/1/2 - SIGHUP: use config.Check before applying reload; log parse vs semantic error separately; refuse reload on any error - Rename default config path /etc/maglev → /etc/vpp-maglev maglevc - Add 'watch events [num <n>] [log [level <level>]] [backend] [frontend]' command; prints compact protojson, stops on any keypress or Ctrl-C; uses cbreak mode (not raw) so output post-processing is preserved - Add 'set backend <name> enable|disable' - Add 'set frontend <name> pool <pool> backend <name> weight <0-100>' - Add 'config check' command Debian packaging - Rename service unit to vpp-maglevd.service - Rename conffiles to /etc/default/vpp-maglev and /etc/vpp-maglev/ - Create maglevd system user/group in postinst; add to vpp group if present - Add postrm; add adduser to Depends
This commit is contained in:
@@ -62,7 +62,7 @@ func startTestServer(t *testing.T, ctx context.Context, c *checker.Checker) (Mag
|
||||
t.Fatalf("listen: %v", err)
|
||||
}
|
||||
srv := grpc.NewServer()
|
||||
RegisterMaglevServer(srv, NewServer(ctx, c))
|
||||
RegisterMaglevServer(srv, NewServer(ctx, c, nil, ""))
|
||||
go srv.Serve(lis) //nolint:errcheck
|
||||
|
||||
conn, err := grpc.NewClient(lis.Addr().String(),
|
||||
@@ -198,7 +198,7 @@ func TestPauseResumeBackend(t *testing.T) {
|
||||
client, cleanup := startTestServer(t, ctx, c)
|
||||
defer cleanup()
|
||||
|
||||
info, err := client.PauseBackend(ctx, &PauseResumeRequest{Name: "be0"})
|
||||
info, err := client.PauseBackend(ctx, &BackendRequest{Name: "be0"})
|
||||
if err != nil {
|
||||
t.Fatalf("PauseBackend: %v", err)
|
||||
}
|
||||
@@ -206,7 +206,7 @@ func TestPauseResumeBackend(t *testing.T) {
|
||||
t.Errorf("after pause: got %q, want paused", info.State)
|
||||
}
|
||||
|
||||
info, err = client.ResumeBackend(ctx, &PauseResumeRequest{Name: "be0"})
|
||||
info, err = client.ResumeBackend(ctx, &BackendRequest{Name: "be0"})
|
||||
if err != nil {
|
||||
t.Fatalf("ResumeBackend: %v", err)
|
||||
}
|
||||
@@ -215,6 +215,78 @@ func TestPauseResumeBackend(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetFrontendPoolBackendWeight(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
c := makeTestChecker(ctx)
|
||||
client, cleanup := startTestServer(t, ctx, c)
|
||||
defer cleanup()
|
||||
|
||||
info, err := client.SetFrontendPoolBackendWeight(ctx, &SetWeightRequest{
|
||||
Frontend: "web",
|
||||
Pool: "primary",
|
||||
Backend: "be0",
|
||||
Weight: 42,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetFrontendPoolBackendWeight: %v", err)
|
||||
}
|
||||
if len(info.Pools) == 0 || len(info.Pools[0].Backends) == 0 {
|
||||
t.Fatal("response missing pools/backends")
|
||||
}
|
||||
if info.Pools[0].Backends[0].Weight != 42 {
|
||||
t.Errorf("weight: got %d, want 42", info.Pools[0].Backends[0].Weight)
|
||||
}
|
||||
|
||||
// Invalid weight.
|
||||
_, err = client.SetFrontendPoolBackendWeight(ctx, &SetWeightRequest{
|
||||
Frontend: "web", Pool: "primary", Backend: "be0", Weight: 101,
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error for weight 101")
|
||||
}
|
||||
|
||||
// Unknown frontend.
|
||||
_, err = client.SetFrontendPoolBackendWeight(ctx, &SetWeightRequest{
|
||||
Frontend: "nope", Pool: "primary", Backend: "be0", Weight: 50,
|
||||
})
|
||||
if err == nil {
|
||||
t.Error("expected error for unknown frontend")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnableDisableBackend(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
c := makeTestChecker(ctx)
|
||||
client, cleanup := startTestServer(t, ctx, c)
|
||||
defer cleanup()
|
||||
|
||||
info, err := client.DisableBackend(ctx, &BackendRequest{Name: "be0"})
|
||||
if err != nil {
|
||||
t.Fatalf("DisableBackend: %v", err)
|
||||
}
|
||||
if info.State != "removed" {
|
||||
t.Errorf("after disable: got %q, want removed", info.State)
|
||||
}
|
||||
if info.Enabled {
|
||||
t.Error("after disable: Enabled should be false")
|
||||
}
|
||||
|
||||
info, err = client.EnableBackend(ctx, &BackendRequest{Name: "be0"})
|
||||
if err != nil {
|
||||
t.Fatalf("EnableBackend: %v", err)
|
||||
}
|
||||
if info.State != "unknown" {
|
||||
t.Errorf("after enable: got %q, want unknown", info.State)
|
||||
}
|
||||
if !info.Enabled {
|
||||
t.Error("after enable: Enabled should be true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestListHealthChecks(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -266,7 +338,7 @@ func TestGetHealthCheckNotFound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchBackendEventsServerShutdown(t *testing.T) {
|
||||
func TestWatchEventsServerShutdown(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -277,11 +349,11 @@ func TestWatchBackendEventsServerShutdown(t *testing.T) {
|
||||
client, cleanup := startTestServer(t, srvCtx, c)
|
||||
defer cleanup()
|
||||
|
||||
stream, err := client.WatchBackendEvents(ctx, &WatchRequest{})
|
||||
stream, err := client.WatchEvents(ctx, &WatchRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("WatchBackendEvents: %v", err)
|
||||
t.Fatalf("WatchEvents: %v", err)
|
||||
}
|
||||
// Drain the initial synthetic event.
|
||||
// Drain the initial synthetic backend event.
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
t.Fatalf("initial Recv: %v", err)
|
||||
}
|
||||
@@ -294,7 +366,7 @@ func TestWatchBackendEventsServerShutdown(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchBackendEvents(t *testing.T) {
|
||||
func TestWatchEventsBackend(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -302,17 +374,72 @@ func TestWatchBackendEvents(t *testing.T) {
|
||||
client, cleanup := startTestServer(t, ctx, c)
|
||||
defer cleanup()
|
||||
|
||||
stream, err := client.WatchBackendEvents(ctx, &WatchRequest{})
|
||||
stream, err := client.WatchEvents(ctx, &WatchRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("WatchBackendEvents: %v", err)
|
||||
t.Fatalf("WatchEvents: %v", err)
|
||||
}
|
||||
|
||||
// Should receive the current state for be0 immediately.
|
||||
// Should receive the current state for be0 immediately as a BackendEvent.
|
||||
ev, err := stream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("Recv: %v", err)
|
||||
}
|
||||
if ev.BackendName != "be0" {
|
||||
t.Errorf("initial event: backend=%q, want be0", ev.BackendName)
|
||||
be, ok := ev.Event.(*Event_Backend)
|
||||
if !ok {
|
||||
t.Fatalf("expected BackendEvent, got %T", ev.Event)
|
||||
}
|
||||
if be.Backend.BackendName != "be0" {
|
||||
t.Errorf("initial event: backend=%q, want be0", be.Backend.BackendName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchEventsLogOnly(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
c := makeTestChecker(ctx)
|
||||
client, cleanup := startTestServer(t, ctx, c)
|
||||
defer cleanup()
|
||||
|
||||
f := false
|
||||
stream, err := client.WatchEvents(ctx, &WatchRequest{Backend: &f, Frontend: &f})
|
||||
if err != nil {
|
||||
t.Fatalf("WatchEvents: %v", err)
|
||||
}
|
||||
|
||||
// No initial snapshot should arrive (backend disabled). Verify by checking
|
||||
// that the stream has no immediately-readable event.
|
||||
recvCh := make(chan *Event, 1)
|
||||
go func() {
|
||||
ev, _ := stream.Recv()
|
||||
recvCh <- ev
|
||||
}()
|
||||
select {
|
||||
case ev := <-recvCh:
|
||||
if _, isLog := ev.Event.(*Event_Log); !isLog {
|
||||
t.Errorf("expected only LogEvents, got %T", ev.Event)
|
||||
}
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
// expected: no backend snapshot arrived
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchEventsInvalidLogLevel(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
c := makeTestChecker(ctx)
|
||||
client, cleanup := startTestServer(t, ctx, c)
|
||||
defer cleanup()
|
||||
|
||||
// For streaming RPCs the server error arrives on the first Recv, not on the
|
||||
// initial call.
|
||||
stream, err := client.WatchEvents(ctx, &WatchRequest{LogLevel: "verbose"})
|
||||
if err != nil {
|
||||
t.Fatalf("WatchEvents: %v", err)
|
||||
}
|
||||
_, err = stream.Recv()
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid log_level")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user