Add WatchEvents, enable/disable/weight RPCs, and config check

gRPC / proto
- Rename WatchBackendEvents → WatchEvents; return a stream of Event
  oneof (LogEvent, BackendEvent, FrontendEvent) with optional filter
  flags (log, log_level, backend, frontend)
- Add EnableBackend, DisableBackend, SetFrontendPoolBackendWeight RPCs
- Rename PauseResumeRequest → BackendRequest
- Add CheckConfig RPC returning ok/parse_error/semantic_error

maglevd
- Route slog through a LogBroadcaster (slog.Handler) so WatchEvents
  subscribers can receive structured log records independently of the
  daemon's own --log-level
- Add --reflection flag (default true) to toggle gRPC server reflection
- Add --check flag: validates config file and exits 0/1/2
- SIGHUP: use config.Check before applying reload; log parse vs semantic
  error separately; refuse reload on any error
- Rename default config path /etc/maglev → /etc/vpp-maglev

maglevc
- Add 'watch events [num <n>] [log [level <level>]] [backend] [frontend]'
  command; prints compact protojson, stops on any keypress or Ctrl-C;
  uses cbreak mode (not raw) so output post-processing is preserved
- Add 'set backend <name> enable|disable'
- Add 'set frontend <name> pool <pool> backend <name> weight <0-100>'
- Add 'config check' command

Debian packaging
- Rename service unit to vpp-maglevd.service
- Rename conffiles to /etc/default/vpp-maglev and /etc/vpp-maglev/
- Create maglevd system user/group in postinst; add to vpp group if present
- Add postrm; add adduser to Depends
This commit is contained in:
2026-04-11 16:42:11 +02:00
parent d612086a5f
commit 58391f5463
26 changed files with 1969 additions and 400 deletions

View File

@@ -4,6 +4,7 @@ package checker
import (
"context"
"fmt"
"log/slog"
"net"
"sort"
@@ -42,6 +43,7 @@ type worker struct {
// Each backend is probed exactly once, regardless of how many frontends
// reference it.
type Checker struct {
runCtx context.Context // set in Run; used by EnableBackend to start new goroutines
cfg *config.Config
mu sync.RWMutex
workers map[string]*worker // keyed by backend name
@@ -67,6 +69,7 @@ func (c *Checker) Run(ctx context.Context) error {
go c.fanOut(ctx)
c.mu.Lock()
c.runCtx = ctx // safe: held under mu before any EnableBackend call can read it
names := activeBackendNames(c.cfg)
maxHistory := c.cfg.HealthChecker.TransitionHistory
for i, name := range names {
@@ -167,6 +170,36 @@ func (c *Checker) GetFrontend(name string) (config.Frontend, bool) {
return v, ok
}
// SetFrontendPoolBackendWeight updates the weight of a backend within a named
// pool of a frontend. Returns the updated FrontendInfo and a descriptive error
// if the frontend, pool, or backend is not found or the weight is out of range.
func (c *Checker) SetFrontendPoolBackendWeight(frontendName, poolName, backendName string, weight int) (config.Frontend, error) {
if weight < 0 || weight > 100 {
return config.Frontend{}, fmt.Errorf("weight %d out of range [0, 100]", weight)
}
c.mu.Lock()
defer c.mu.Unlock()
fe, ok := c.cfg.Frontends[frontendName]
if !ok {
return config.Frontend{}, fmt.Errorf("frontend %q not found", frontendName)
}
for i, pool := range fe.Pools {
if pool.Name != poolName {
continue
}
pb, ok := pool.Backends[backendName]
if !ok {
return config.Frontend{}, fmt.Errorf("backend %q not found in pool %q", backendName, poolName)
}
pb.Weight = weight
fe.Pools[i].Backends[backendName] = pb
c.cfg.Frontends[frontendName] = fe
slog.Info("frontend-pool-weight", "frontend", frontendName, "pool", poolName, "backend", backendName, "weight", weight)
return fe, nil
}
return config.Frontend{}, fmt.Errorf("pool %q not found in frontend %q", poolName, frontendName)
}
// ListHealthChecks returns the names of all configured health checks, sorted.
func (c *Checker) ListHealthChecks() []string {
c.mu.RLock()
@@ -278,6 +311,59 @@ func (c *Checker) ResumeBackend(name string) (BackendSnapshot, bool) {
return BackendSnapshot{Health: w.backend, Config: w.entry}, true
}
// DisableBackend stops health checking for a backend and removes it from active
// rotation. The worker entry is kept in the map so the backend remains visible
// via GetBackend and can be re-enabled with EnableBackend.
func (c *Checker) DisableBackend(name string) (BackendSnapshot, bool) {
c.mu.Lock()
defer c.mu.Unlock()
w, ok := c.workers[name]
if !ok {
return BackendSnapshot{}, false
}
if !w.entry.Enabled {
return BackendSnapshot{Health: w.backend, Config: w.entry}, true
}
maxHistory := c.cfg.HealthChecker.TransitionHistory
t := w.backend.Remove(maxHistory)
slog.Info("backend-disable", "backend", name)
c.emitForBackend(name, w.backend.Address, t, c.cfg.Frontends)
w.cancel()
w.entry.Enabled = false
if b, ok := c.cfg.Backends[name]; ok {
b.Enabled = false
c.cfg.Backends[name] = b
}
return BackendSnapshot{Health: w.backend, Config: w.entry}, true
}
// EnableBackend re-enables a previously disabled backend. A fresh probe
// goroutine is started and the backend re-enters StateUnknown.
func (c *Checker) EnableBackend(name string) (BackendSnapshot, bool) {
c.mu.Lock()
defer c.mu.Unlock()
w, ok := c.workers[name]
if !ok {
return BackendSnapshot{}, false
}
if w.entry.Enabled {
return BackendSnapshot{Health: w.backend, Config: w.entry}, true
}
entry := w.entry
entry.Enabled = true
if b, ok := c.cfg.Backends[name]; ok {
b.Enabled = true
c.cfg.Backends[name] = b
}
maxHistory := c.cfg.HealthChecker.TransitionHistory
hc := c.cfg.HealthChecks[entry.HealthCheck]
slog.Info("backend-enable", "backend", name)
c.startWorker(c.runCtx, name, entry, hc, 0, 1, maxHistory)
nw := c.workers[name]
c.emitForBackend(name, nw.backend.Address, nw.backend.Transitions[0], c.cfg.Frontends)
return BackendSnapshot{Health: nw.backend, Config: nw.entry}, true
}
// ---- internal --------------------------------------------------------------
// startWorker creates a Backend and launches a probe goroutine.

View File

@@ -246,6 +246,98 @@ func TestSubscribe(t *testing.T) {
}
}
func TestSetFrontendPoolBackendWeight(t *testing.T) {
cfg := makeTestConfig(time.Hour, 3, 2)
c := New(cfg)
// Valid weight change.
fe, err := c.SetFrontendPoolBackendWeight("web", "primary", "be0", 42)
if err != nil {
t.Fatalf("SetFrontendPoolBackendWeight: %v", err)
}
if fe.Pools[0].Backends["be0"].Weight != 42 {
t.Errorf("weight: got %d, want 42", fe.Pools[0].Backends["be0"].Weight)
}
// Persisted in live config.
got, _ := c.GetFrontend("web")
if got.Pools[0].Backends["be0"].Weight != 42 {
t.Errorf("config weight: got %d, want 42", got.Pools[0].Backends["be0"].Weight)
}
// Out-of-range weight.
if _, err := c.SetFrontendPoolBackendWeight("web", "primary", "be0", 101); err == nil {
t.Error("expected error for weight 101")
}
// Unknown frontend.
if _, err := c.SetFrontendPoolBackendWeight("nope", "primary", "be0", 50); err == nil {
t.Error("expected error for unknown frontend")
}
// Unknown pool.
if _, err := c.SetFrontendPoolBackendWeight("web", "nope", "be0", 50); err == nil {
t.Error("expected error for unknown pool")
}
// Unknown backend.
if _, err := c.SetFrontendPoolBackendWeight("web", "primary", "nope", 50); err == nil {
t.Error("expected error for unknown backend in pool")
}
}
func TestEnableDisable(t *testing.T) {
cfg := makeTestConfig(time.Hour, 3, 2)
c := New(cfg)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go c.fanOut(ctx)
// Seed a worker as EnableBackend/DisableBackend require one in c.workers.
wCtx, wCancel := context.WithCancel(ctx)
c.mu.Lock()
c.runCtx = ctx
c.workers["be0"] = &worker{
backend: health.New("be0", net.ParseIP("10.0.0.2"), 2, 3),
hc: cfg.HealthChecks["icmp"],
entry: cfg.Backends["be0"],
cancel: wCancel,
}
c.mu.Unlock()
_ = wCtx
b, ok := c.DisableBackend("be0")
if !ok {
t.Fatal("DisableBackend: not found")
}
if b.Health.State != health.StateRemoved {
t.Errorf("after disable: state=%s, want removed", b.Health.State)
}
if b.Config.Enabled {
t.Error("after disable: Enabled should be false")
}
// Backend should still be visible after disable.
snap, ok := c.GetBackend("be0")
if !ok {
t.Fatal("GetBackend after disable: not found")
}
if snap.Config.Enabled {
t.Error("GetBackend after disable: Enabled should be false")
}
b, ok = c.EnableBackend("be0")
if !ok {
t.Fatal("EnableBackend: not found")
}
if b.Health.State != health.StateUnknown {
t.Errorf("after enable: state=%s, want unknown", b.Health.State)
}
if !b.Config.Enabled {
t.Error("after enable: Enabled should be true")
}
}
func TestPauseResume(t *testing.T) {
cfg := makeTestConfig(time.Hour, 3, 2)
c := New(cfg)

View File

@@ -156,21 +156,58 @@ type rawFrontend struct {
Pools []rawPool `yaml:"pools"`
}
// ---- Load ------------------------------------------------------------------
// ---- Check / Load ----------------------------------------------------------
// CheckResult holds the outcome of a config file validation. Exactly one of
// ParseError and SemanticError is non-empty when the config is invalid; both
// are empty on success.
type CheckResult struct {
ParseError string // YAML could not be read or parsed
SemanticError string // YAML parsed but semantic validation failed
}
// OK reports whether the config is valid.
func (r CheckResult) OK() bool {
return r.ParseError == "" && r.SemanticError == ""
}
// Check reads and validates the config file at path, returning the parsed
// Config (nil on failure) and a CheckResult that distinguishes YAML parse
// errors from semantic validation errors.
func Check(path string) (*Config, CheckResult) {
data, err := os.ReadFile(path)
if err != nil {
return nil, CheckResult{ParseError: fmt.Sprintf("read %q: %v", path, err)}
}
var raw rawConfig
if err := yaml.Unmarshal(data, &raw); err != nil {
return nil, CheckResult{ParseError: fmt.Sprintf("parse yaml: %v", err)}
}
cfg, err := convert(&raw.Maglev)
if err != nil {
return nil, CheckResult{SemanticError: err.Error()}
}
return cfg, CheckResult{}
}
// Load reads and validates the config file at path.
func Load(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("read config %q: %w", path, err)
cfg, result := Check(path)
if !result.OK() {
if result.ParseError != "" {
return nil, fmt.Errorf("%s", result.ParseError)
}
return nil, fmt.Errorf("%s", result.SemanticError)
}
return parse(data)
return cfg, nil
}
// parse unmarshals raw YAML bytes and converts them into a validated Config.
// Used by tests; production code goes through Check or Load.
func parse(data []byte) (*Config, error) {
var raw rawConfig
if err := yaml.Unmarshal(data, &raw); err != nil {
return nil, fmt.Errorf("parse yaml: %w", err)
return nil, fmt.Errorf("parse yaml: %v", err)
}
return convert(&raw.Maglev)
}

View File

@@ -0,0 +1,148 @@
// Copyright (c) 2026, Pim van Pelt <pim@ipng.ch>
package grpcapi
import (
"context"
"log/slog"
"sync"
)
// logSub is a single WatchEvents subscriber interested in log events.
type logSub struct {
minLevel slog.Level
ch chan *LogEvent
}
// broadcasterState holds the shared subscription registry. It is referenced
// by pointer so that copies returned from WithAttrs/WithGroup share the same set.
type broadcasterState struct {
mu sync.Mutex
nextID int
subs map[int]*logSub
}
func (s *broadcasterState) subscribe(minLevel slog.Level) (<-chan *LogEvent, func()) {
s.mu.Lock()
id := s.nextID
s.nextID++
sub := &logSub{minLevel: minLevel, ch: make(chan *LogEvent, 256)}
s.subs[id] = sub
s.mu.Unlock()
return sub.ch, func() {
s.mu.Lock()
delete(s.subs, id)
close(sub.ch)
s.mu.Unlock()
}
}
// hasSubscriberAt reports whether any subscriber wants records at level or above.
func (s *broadcasterState) hasSubscriberAt(level slog.Level) bool {
s.mu.Lock()
defer s.mu.Unlock()
for _, sub := range s.subs {
if level >= sub.minLevel {
return true
}
}
return false
}
func (s *broadcasterState) fanOut(level slog.Level, ev *LogEvent) {
s.mu.Lock()
for _, sub := range s.subs {
if level >= sub.minLevel {
select {
case sub.ch <- ev:
default:
// slow subscriber — drop rather than block
}
}
}
s.mu.Unlock()
}
// LogBroadcaster implements slog.Handler. It forwards every record to an
// inner handler (e.g. the JSON stdout handler) and simultaneously fans out
// structured LogEvent messages to all gRPC WatchEvents subscribers.
type LogBroadcaster struct {
inner slog.Handler
preAttrs []*LogAttr // pre-resolved attrs from WithAttrs calls
groupPfx string // key prefix accumulated by WithGroup calls
shared *broadcasterState
}
// NewLogBroadcaster wraps inner and returns a LogBroadcaster ready for use
// as the process slog default handler.
func NewLogBroadcaster(inner slog.Handler) *LogBroadcaster {
return &LogBroadcaster{
inner: inner,
shared: &broadcasterState{subs: make(map[int]*logSub)},
}
}
// Subscribe registers a subscriber that receives LogEvents at or above
// minLevel. The returned channel is closed when the cancel func is called.
func (b *LogBroadcaster) Subscribe(minLevel slog.Level) (<-chan *LogEvent, func()) {
return b.shared.subscribe(minLevel)
}
// Enabled implements slog.Handler. It returns true when either the inner
// handler wants the record OR at least one gRPC subscriber has a minLevel at
// or below level. This allows a WatchEvents client requesting debug log events
// to receive them even when maglevd's own -log-level is set higher (e.g. error).
func (b *LogBroadcaster) Enabled(ctx context.Context, level slog.Level) bool {
return b.inner.Enabled(ctx, level) || b.shared.hasSubscriberAt(level)
}
// Handle implements slog.Handler. It forwards the record to the inner handler
// only when the inner handler is enabled for that level (avoiding duplicate or
// unwanted stdout output), then fans it out to all interested gRPC subscribers.
func (b *LogBroadcaster) Handle(ctx context.Context, r slog.Record) error {
var err error
if b.inner.Enabled(ctx, r.Level) {
err = b.inner.Handle(ctx, r)
}
attrs := make([]*LogAttr, 0, len(b.preAttrs)+r.NumAttrs())
attrs = append(attrs, b.preAttrs...)
r.Attrs(func(a slog.Attr) bool {
attrs = append(attrs, &LogAttr{Key: b.groupPfx + a.Key, Value: a.Value.String()})
return true
})
ev := &LogEvent{
AtUnixNs: r.Time.UnixNano(),
Level: r.Level.String(),
Msg: r.Message,
Attrs: attrs,
}
b.shared.fanOut(r.Level, ev)
return err
}
// WithAttrs implements slog.Handler.
func (b *LogBroadcaster) WithAttrs(attrs []slog.Attr) slog.Handler {
pre := make([]*LogAttr, len(b.preAttrs), len(b.preAttrs)+len(attrs))
copy(pre, b.preAttrs)
for _, a := range attrs {
pre = append(pre, &LogAttr{Key: b.groupPfx + a.Key, Value: a.Value.String()})
}
return &LogBroadcaster{
inner: b.inner.WithAttrs(attrs),
preAttrs: pre,
groupPfx: b.groupPfx,
shared: b.shared,
}
}
// WithGroup implements slog.Handler.
func (b *LogBroadcaster) WithGroup(name string) slog.Handler {
return &LogBroadcaster{
inner: b.inner.WithGroup(name),
preAttrs: b.preAttrs,
groupPfx: b.groupPfx + name + ".",
shared: b.shared,
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -19,15 +19,19 @@ import (
const _ = grpc.SupportPackageIsVersion9
const (
Maglev_ListFrontends_FullMethodName = "/maglev.Maglev/ListFrontends"
Maglev_GetFrontend_FullMethodName = "/maglev.Maglev/GetFrontend"
Maglev_ListBackends_FullMethodName = "/maglev.Maglev/ListBackends"
Maglev_GetBackend_FullMethodName = "/maglev.Maglev/GetBackend"
Maglev_PauseBackend_FullMethodName = "/maglev.Maglev/PauseBackend"
Maglev_ResumeBackend_FullMethodName = "/maglev.Maglev/ResumeBackend"
Maglev_ListHealthChecks_FullMethodName = "/maglev.Maglev/ListHealthChecks"
Maglev_GetHealthCheck_FullMethodName = "/maglev.Maglev/GetHealthCheck"
Maglev_WatchBackendEvents_FullMethodName = "/maglev.Maglev/WatchBackendEvents"
Maglev_ListFrontends_FullMethodName = "/maglev.Maglev/ListFrontends"
Maglev_GetFrontend_FullMethodName = "/maglev.Maglev/GetFrontend"
Maglev_ListBackends_FullMethodName = "/maglev.Maglev/ListBackends"
Maglev_GetBackend_FullMethodName = "/maglev.Maglev/GetBackend"
Maglev_PauseBackend_FullMethodName = "/maglev.Maglev/PauseBackend"
Maglev_ResumeBackend_FullMethodName = "/maglev.Maglev/ResumeBackend"
Maglev_EnableBackend_FullMethodName = "/maglev.Maglev/EnableBackend"
Maglev_DisableBackend_FullMethodName = "/maglev.Maglev/DisableBackend"
Maglev_ListHealthChecks_FullMethodName = "/maglev.Maglev/ListHealthChecks"
Maglev_GetHealthCheck_FullMethodName = "/maglev.Maglev/GetHealthCheck"
Maglev_SetFrontendPoolBackendWeight_FullMethodName = "/maglev.Maglev/SetFrontendPoolBackendWeight"
Maglev_WatchEvents_FullMethodName = "/maglev.Maglev/WatchEvents"
Maglev_CheckConfig_FullMethodName = "/maglev.Maglev/CheckConfig"
)
// MaglevClient is the client API for Maglev service.
@@ -40,11 +44,15 @@ type MaglevClient interface {
GetFrontend(ctx context.Context, in *GetFrontendRequest, opts ...grpc.CallOption) (*FrontendInfo, error)
ListBackends(ctx context.Context, in *ListBackendsRequest, opts ...grpc.CallOption) (*ListBackendsResponse, error)
GetBackend(ctx context.Context, in *GetBackendRequest, opts ...grpc.CallOption) (*BackendInfo, error)
PauseBackend(ctx context.Context, in *PauseResumeRequest, opts ...grpc.CallOption) (*BackendInfo, error)
ResumeBackend(ctx context.Context, in *PauseResumeRequest, opts ...grpc.CallOption) (*BackendInfo, error)
PauseBackend(ctx context.Context, in *BackendRequest, opts ...grpc.CallOption) (*BackendInfo, error)
ResumeBackend(ctx context.Context, in *BackendRequest, opts ...grpc.CallOption) (*BackendInfo, error)
EnableBackend(ctx context.Context, in *BackendRequest, opts ...grpc.CallOption) (*BackendInfo, error)
DisableBackend(ctx context.Context, in *BackendRequest, opts ...grpc.CallOption) (*BackendInfo, error)
ListHealthChecks(ctx context.Context, in *ListHealthChecksRequest, opts ...grpc.CallOption) (*ListHealthChecksResponse, error)
GetHealthCheck(ctx context.Context, in *GetHealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckInfo, error)
WatchBackendEvents(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[BackendEvent], error)
SetFrontendPoolBackendWeight(ctx context.Context, in *SetWeightRequest, opts ...grpc.CallOption) (*FrontendInfo, error)
WatchEvents(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Event], error)
CheckConfig(ctx context.Context, in *CheckConfigRequest, opts ...grpc.CallOption) (*CheckConfigResponse, error)
}
type maglevClient struct {
@@ -95,7 +103,7 @@ func (c *maglevClient) GetBackend(ctx context.Context, in *GetBackendRequest, op
return out, nil
}
func (c *maglevClient) PauseBackend(ctx context.Context, in *PauseResumeRequest, opts ...grpc.CallOption) (*BackendInfo, error) {
func (c *maglevClient) PauseBackend(ctx context.Context, in *BackendRequest, opts ...grpc.CallOption) (*BackendInfo, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BackendInfo)
err := c.cc.Invoke(ctx, Maglev_PauseBackend_FullMethodName, in, out, cOpts...)
@@ -105,7 +113,7 @@ func (c *maglevClient) PauseBackend(ctx context.Context, in *PauseResumeRequest,
return out, nil
}
func (c *maglevClient) ResumeBackend(ctx context.Context, in *PauseResumeRequest, opts ...grpc.CallOption) (*BackendInfo, error) {
func (c *maglevClient) ResumeBackend(ctx context.Context, in *BackendRequest, opts ...grpc.CallOption) (*BackendInfo, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BackendInfo)
err := c.cc.Invoke(ctx, Maglev_ResumeBackend_FullMethodName, in, out, cOpts...)
@@ -115,6 +123,26 @@ func (c *maglevClient) ResumeBackend(ctx context.Context, in *PauseResumeRequest
return out, nil
}
func (c *maglevClient) EnableBackend(ctx context.Context, in *BackendRequest, opts ...grpc.CallOption) (*BackendInfo, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BackendInfo)
err := c.cc.Invoke(ctx, Maglev_EnableBackend_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *maglevClient) DisableBackend(ctx context.Context, in *BackendRequest, opts ...grpc.CallOption) (*BackendInfo, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BackendInfo)
err := c.cc.Invoke(ctx, Maglev_DisableBackend_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *maglevClient) ListHealthChecks(ctx context.Context, in *ListHealthChecksRequest, opts ...grpc.CallOption) (*ListHealthChecksResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListHealthChecksResponse)
@@ -135,13 +163,23 @@ func (c *maglevClient) GetHealthCheck(ctx context.Context, in *GetHealthCheckReq
return out, nil
}
func (c *maglevClient) WatchBackendEvents(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[BackendEvent], error) {
func (c *maglevClient) SetFrontendPoolBackendWeight(ctx context.Context, in *SetWeightRequest, opts ...grpc.CallOption) (*FrontendInfo, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Maglev_ServiceDesc.Streams[0], Maglev_WatchBackendEvents_FullMethodName, cOpts...)
out := new(FrontendInfo)
err := c.cc.Invoke(ctx, Maglev_SetFrontendPoolBackendWeight_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[WatchRequest, BackendEvent]{ClientStream: stream}
return out, nil
}
func (c *maglevClient) WatchEvents(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Event], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Maglev_ServiceDesc.Streams[0], Maglev_WatchEvents_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[WatchRequest, Event]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -152,7 +190,17 @@ func (c *maglevClient) WatchBackendEvents(ctx context.Context, in *WatchRequest,
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Maglev_WatchBackendEventsClient = grpc.ServerStreamingClient[BackendEvent]
type Maglev_WatchEventsClient = grpc.ServerStreamingClient[Event]
func (c *maglevClient) CheckConfig(ctx context.Context, in *CheckConfigRequest, opts ...grpc.CallOption) (*CheckConfigResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CheckConfigResponse)
err := c.cc.Invoke(ctx, Maglev_CheckConfig_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// MaglevServer is the server API for Maglev service.
// All implementations must embed UnimplementedMaglevServer
@@ -164,11 +212,15 @@ type MaglevServer interface {
GetFrontend(context.Context, *GetFrontendRequest) (*FrontendInfo, error)
ListBackends(context.Context, *ListBackendsRequest) (*ListBackendsResponse, error)
GetBackend(context.Context, *GetBackendRequest) (*BackendInfo, error)
PauseBackend(context.Context, *PauseResumeRequest) (*BackendInfo, error)
ResumeBackend(context.Context, *PauseResumeRequest) (*BackendInfo, error)
PauseBackend(context.Context, *BackendRequest) (*BackendInfo, error)
ResumeBackend(context.Context, *BackendRequest) (*BackendInfo, error)
EnableBackend(context.Context, *BackendRequest) (*BackendInfo, error)
DisableBackend(context.Context, *BackendRequest) (*BackendInfo, error)
ListHealthChecks(context.Context, *ListHealthChecksRequest) (*ListHealthChecksResponse, error)
GetHealthCheck(context.Context, *GetHealthCheckRequest) (*HealthCheckInfo, error)
WatchBackendEvents(*WatchRequest, grpc.ServerStreamingServer[BackendEvent]) error
SetFrontendPoolBackendWeight(context.Context, *SetWeightRequest) (*FrontendInfo, error)
WatchEvents(*WatchRequest, grpc.ServerStreamingServer[Event]) error
CheckConfig(context.Context, *CheckConfigRequest) (*CheckConfigResponse, error)
mustEmbedUnimplementedMaglevServer()
}
@@ -191,20 +243,32 @@ func (UnimplementedMaglevServer) ListBackends(context.Context, *ListBackendsRequ
func (UnimplementedMaglevServer) GetBackend(context.Context, *GetBackendRequest) (*BackendInfo, error) {
return nil, status.Error(codes.Unimplemented, "method GetBackend not implemented")
}
func (UnimplementedMaglevServer) PauseBackend(context.Context, *PauseResumeRequest) (*BackendInfo, error) {
func (UnimplementedMaglevServer) PauseBackend(context.Context, *BackendRequest) (*BackendInfo, error) {
return nil, status.Error(codes.Unimplemented, "method PauseBackend not implemented")
}
func (UnimplementedMaglevServer) ResumeBackend(context.Context, *PauseResumeRequest) (*BackendInfo, error) {
func (UnimplementedMaglevServer) ResumeBackend(context.Context, *BackendRequest) (*BackendInfo, error) {
return nil, status.Error(codes.Unimplemented, "method ResumeBackend not implemented")
}
func (UnimplementedMaglevServer) EnableBackend(context.Context, *BackendRequest) (*BackendInfo, error) {
return nil, status.Error(codes.Unimplemented, "method EnableBackend not implemented")
}
func (UnimplementedMaglevServer) DisableBackend(context.Context, *BackendRequest) (*BackendInfo, error) {
return nil, status.Error(codes.Unimplemented, "method DisableBackend not implemented")
}
func (UnimplementedMaglevServer) ListHealthChecks(context.Context, *ListHealthChecksRequest) (*ListHealthChecksResponse, error) {
return nil, status.Error(codes.Unimplemented, "method ListHealthChecks not implemented")
}
func (UnimplementedMaglevServer) GetHealthCheck(context.Context, *GetHealthCheckRequest) (*HealthCheckInfo, error) {
return nil, status.Error(codes.Unimplemented, "method GetHealthCheck not implemented")
}
func (UnimplementedMaglevServer) WatchBackendEvents(*WatchRequest, grpc.ServerStreamingServer[BackendEvent]) error {
return status.Error(codes.Unimplemented, "method WatchBackendEvents not implemented")
func (UnimplementedMaglevServer) SetFrontendPoolBackendWeight(context.Context, *SetWeightRequest) (*FrontendInfo, error) {
return nil, status.Error(codes.Unimplemented, "method SetFrontendPoolBackendWeight not implemented")
}
func (UnimplementedMaglevServer) WatchEvents(*WatchRequest, grpc.ServerStreamingServer[Event]) error {
return status.Error(codes.Unimplemented, "method WatchEvents not implemented")
}
func (UnimplementedMaglevServer) CheckConfig(context.Context, *CheckConfigRequest) (*CheckConfigResponse, error) {
return nil, status.Error(codes.Unimplemented, "method CheckConfig not implemented")
}
func (UnimplementedMaglevServer) mustEmbedUnimplementedMaglevServer() {}
func (UnimplementedMaglevServer) testEmbeddedByValue() {}
@@ -300,7 +364,7 @@ func _Maglev_GetBackend_Handler(srv interface{}, ctx context.Context, dec func(i
}
func _Maglev_PauseBackend_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PauseResumeRequest)
in := new(BackendRequest)
if err := dec(in); err != nil {
return nil, err
}
@@ -312,13 +376,13 @@ func _Maglev_PauseBackend_Handler(srv interface{}, ctx context.Context, dec func
FullMethod: Maglev_PauseBackend_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MaglevServer).PauseBackend(ctx, req.(*PauseResumeRequest))
return srv.(MaglevServer).PauseBackend(ctx, req.(*BackendRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Maglev_ResumeBackend_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PauseResumeRequest)
in := new(BackendRequest)
if err := dec(in); err != nil {
return nil, err
}
@@ -330,7 +394,43 @@ func _Maglev_ResumeBackend_Handler(srv interface{}, ctx context.Context, dec fun
FullMethod: Maglev_ResumeBackend_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MaglevServer).ResumeBackend(ctx, req.(*PauseResumeRequest))
return srv.(MaglevServer).ResumeBackend(ctx, req.(*BackendRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Maglev_EnableBackend_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BackendRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MaglevServer).EnableBackend(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Maglev_EnableBackend_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MaglevServer).EnableBackend(ctx, req.(*BackendRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Maglev_DisableBackend_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BackendRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MaglevServer).DisableBackend(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Maglev_DisableBackend_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MaglevServer).DisableBackend(ctx, req.(*BackendRequest))
}
return interceptor(ctx, in, info, handler)
}
@@ -371,16 +471,52 @@ func _Maglev_GetHealthCheck_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
func _Maglev_WatchBackendEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
func _Maglev_SetFrontendPoolBackendWeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SetWeightRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MaglevServer).SetFrontendPoolBackendWeight(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Maglev_SetFrontendPoolBackendWeight_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MaglevServer).SetFrontendPoolBackendWeight(ctx, req.(*SetWeightRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Maglev_WatchEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(WatchRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(MaglevServer).WatchBackendEvents(m, &grpc.GenericServerStream[WatchRequest, BackendEvent]{ServerStream: stream})
return srv.(MaglevServer).WatchEvents(m, &grpc.GenericServerStream[WatchRequest, Event]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Maglev_WatchBackendEventsServer = grpc.ServerStreamingServer[BackendEvent]
type Maglev_WatchEventsServer = grpc.ServerStreamingServer[Event]
func _Maglev_CheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CheckConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MaglevServer).CheckConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Maglev_CheckConfig_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MaglevServer).CheckConfig(ctx, req.(*CheckConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
// Maglev_ServiceDesc is the grpc.ServiceDesc for Maglev service.
// It's only intended for direct use with grpc.RegisterService,
@@ -413,6 +549,14 @@ var Maglev_ServiceDesc = grpc.ServiceDesc{
MethodName: "ResumeBackend",
Handler: _Maglev_ResumeBackend_Handler,
},
{
MethodName: "EnableBackend",
Handler: _Maglev_EnableBackend_Handler,
},
{
MethodName: "DisableBackend",
Handler: _Maglev_DisableBackend_Handler,
},
{
MethodName: "ListHealthChecks",
Handler: _Maglev_ListHealthChecks_Handler,
@@ -421,11 +565,19 @@ var Maglev_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetHealthCheck",
Handler: _Maglev_GetHealthCheck_Handler,
},
{
MethodName: "SetFrontendPoolBackendWeight",
Handler: _Maglev_SetFrontendPoolBackendWeight_Handler,
},
{
MethodName: "CheckConfig",
Handler: _Maglev_CheckConfig_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "WatchBackendEvents",
Handler: _Maglev_WatchBackendEvents_Handler,
StreamName: "WatchEvents",
Handler: _Maglev_WatchEvents_Handler,
ServerStreams: true,
},
},

View File

@@ -4,6 +4,7 @@ package grpcapi
import (
"context"
"log/slog"
"net"
"google.golang.org/grpc/codes"
@@ -17,15 +18,20 @@ import (
// Server implements the MaglevServer gRPC interface.
type Server struct {
UnimplementedMaglevServer
ctx context.Context
checker *checker.Checker
ctx context.Context
checker *checker.Checker
logs *LogBroadcaster
configPath string
}
// NewServer creates a Server backed by the given Checker. The provided context
// controls the lifetime of streaming RPCs: cancelling it closes all active
// WatchBackendEvents streams so that grpc.Server.GracefulStop can complete.
func NewServer(ctx context.Context, c *checker.Checker) *Server {
return &Server{ctx: ctx, checker: c}
// NewServer creates a Server backed by the given Checker. logs may be nil, in
// which case log events are never sent to WatchEvents streams. configPath is
// used by CheckConfig to reload and validate the configuration file on demand.
// The provided context controls the lifetime of streaming RPCs: cancelling it
// closes all active WatchEvents streams so that grpc.Server.GracefulStop can
// complete.
func NewServer(ctx context.Context, c *checker.Checker, logs *LogBroadcaster, configPath string) *Server {
return &Server{ctx: ctx, checker: c, logs: logs, configPath: configPath}
}
// ListFrontends returns the names of all configured frontends.
@@ -57,7 +63,7 @@ func (s *Server) GetBackend(_ context.Context, req *GetBackendRequest) (*Backend
}
// PauseBackend pauses health checking for a backend by name.
func (s *Server) PauseBackend(_ context.Context, req *PauseResumeRequest) (*BackendInfo, error) {
func (s *Server) PauseBackend(_ context.Context, req *BackendRequest) (*BackendInfo, error) {
b, ok := s.checker.PauseBackend(req.Name)
if !ok {
return nil, status.Errorf(codes.NotFound, "backend %q not found", req.Name)
@@ -66,7 +72,7 @@ func (s *Server) PauseBackend(_ context.Context, req *PauseResumeRequest) (*Back
}
// ResumeBackend resumes health checking for a backend by name.
func (s *Server) ResumeBackend(_ context.Context, req *PauseResumeRequest) (*BackendInfo, error) {
func (s *Server) ResumeBackend(_ context.Context, req *BackendRequest) (*BackendInfo, error) {
b, ok := s.checker.ResumeBackend(req.Name)
if !ok {
return nil, status.Errorf(codes.NotFound, "backend %q not found", req.Name)
@@ -74,6 +80,36 @@ func (s *Server) ResumeBackend(_ context.Context, req *PauseResumeRequest) (*Bac
return backendToProto(b), nil
}
// EnableBackend re-enables a previously disabled backend.
func (s *Server) EnableBackend(_ context.Context, req *BackendRequest) (*BackendInfo, error) {
b, ok := s.checker.EnableBackend(req.Name)
if !ok {
return nil, status.Errorf(codes.NotFound, "backend %q not found", req.Name)
}
return backendToProto(b), nil
}
// DisableBackend disables a backend, stopping its probe goroutine.
func (s *Server) DisableBackend(_ context.Context, req *BackendRequest) (*BackendInfo, error) {
b, ok := s.checker.DisableBackend(req.Name)
if !ok {
return nil, status.Errorf(codes.NotFound, "backend %q not found", req.Name)
}
return backendToProto(b), nil
}
// SetFrontendPoolBackendWeight updates the weight of a backend in a pool.
func (s *Server) SetFrontendPoolBackendWeight(_ context.Context, req *SetWeightRequest) (*FrontendInfo, error) {
if req.Weight < 0 || req.Weight > 100 {
return nil, status.Errorf(codes.InvalidArgument, "weight %d out of range [0, 100]", req.Weight)
}
fe, err := s.checker.SetFrontendPoolBackendWeight(req.Frontend, req.Pool, req.Backend, int(req.Weight))
if err != nil {
return nil, status.Errorf(codes.NotFound, "%v", err)
}
return frontendToProto(req.Frontend, fe), nil
}
// ListHealthChecks returns the names of all configured health checks.
func (s *Server) ListHealthChecks(_ context.Context, _ *ListHealthChecksRequest) (*ListHealthChecksResponse, error) {
return &ListHealthChecksResponse{Names: s.checker.ListHealthChecks()}, nil
@@ -88,30 +124,55 @@ func (s *Server) GetHealthCheck(_ context.Context, req *GetHealthCheckRequest) (
return healthCheckToProto(req.Name, hc), nil
}
// WatchBackendEvents streams the current state of all backends on connect, then
// streams live state transitions until the client disconnects.
func (s *Server) WatchBackendEvents(_ *WatchRequest, stream Maglev_WatchBackendEventsServer) error {
// Send current state of all backends as synthetic events.
for _, name := range s.checker.ListBackends() {
snap, ok := s.checker.GetBackend(name)
if !ok {
continue
}
ev := &BackendEvent{
BackendName: name,
Transition: &TransitionRecord{
From: snap.Health.State.String(),
To: snap.Health.State.String(),
AtUnixNs: 0,
},
}
if err := stream.Send(ev); err != nil {
return err
// WatchEvents streams events to the client. On connect, the current state of
// all backends is sent as synthetic BackendEvents. Afterwards, live events are
// forwarded based on the filter flags in req. An unset (nil) flag defaults to
// true (subscribe). An empty log_level defaults to "info".
func (s *Server) WatchEvents(req *WatchRequest, stream Maglev_WatchEventsServer) error {
wantLog := req.Log == nil || *req.Log
wantBackend := req.Backend == nil || *req.Backend
wantFrontend := req.Frontend == nil || *req.Frontend
_ = wantFrontend // no frontend events emitted yet
logLevel := slog.LevelInfo
if req.LogLevel != "" {
if err := logLevel.UnmarshalText([]byte(req.LogLevel)); err != nil {
return status.Errorf(codes.InvalidArgument, "invalid log_level %q: must be debug, info, warn, or error", req.LogLevel)
}
}
ch, unsub := s.checker.Subscribe()
defer unsub()
// Subscribe to log events (nil channel blocks forever when not wanted).
var logCh <-chan *LogEvent
if wantLog && s.logs != nil {
var unsub func()
logCh, unsub = s.logs.Subscribe(logLevel)
defer unsub()
}
// Subscribe to backend events; send initial state snapshot first.
var backendCh <-chan checker.Event
if wantBackend {
for _, name := range s.checker.ListBackends() {
snap, ok := s.checker.GetBackend(name)
if !ok {
continue
}
ev := &Event{Event: &Event_Backend{Backend: &BackendEvent{
BackendName: name,
Transition: &TransitionRecord{
From: snap.Health.State.String(),
To: snap.Health.State.String(),
AtUnixNs: 0,
},
}}}
if err := stream.Send(ev); err != nil {
return err
}
}
var unsub func()
backendCh, unsub = s.checker.Subscribe()
defer unsub()
}
for {
select {
@@ -119,21 +180,38 @@ func (s *Server) WatchBackendEvents(_ *WatchRequest, stream Maglev_WatchBackendE
return status.Error(codes.Unavailable, "server shutting down")
case <-stream.Context().Done():
return nil
case e, ok := <-ch:
case le, ok := <-logCh:
if !ok {
return nil
}
ev := &BackendEvent{
if err := stream.Send(&Event{Event: &Event_Log{Log: le}}); err != nil {
return err
}
case e, ok := <-backendCh:
if !ok {
return nil
}
if err := stream.Send(&Event{Event: &Event_Backend{Backend: &BackendEvent{
BackendName: e.BackendName,
Transition: transitionToProto(e.Transition),
}
if err := stream.Send(ev); err != nil {
}}}); err != nil {
return err
}
}
}
}
// CheckConfig reads and validates the configuration file, returning a
// structured result that distinguishes YAML parse errors from semantic errors.
func (s *Server) CheckConfig(_ context.Context, _ *CheckConfigRequest) (*CheckConfigResponse, error) {
_, result := config.Check(s.configPath)
return &CheckConfigResponse{
Ok: result.OK(),
ParseError: result.ParseError,
SemanticError: result.SemanticError,
}, nil
}
// ---- conversion helpers ----------------------------------------------------
func frontendToProto(name string, fe config.Frontend) *FrontendInfo {

View File

@@ -62,7 +62,7 @@ func startTestServer(t *testing.T, ctx context.Context, c *checker.Checker) (Mag
t.Fatalf("listen: %v", err)
}
srv := grpc.NewServer()
RegisterMaglevServer(srv, NewServer(ctx, c))
RegisterMaglevServer(srv, NewServer(ctx, c, nil, ""))
go srv.Serve(lis) //nolint:errcheck
conn, err := grpc.NewClient(lis.Addr().String(),
@@ -198,7 +198,7 @@ func TestPauseResumeBackend(t *testing.T) {
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
info, err := client.PauseBackend(ctx, &PauseResumeRequest{Name: "be0"})
info, err := client.PauseBackend(ctx, &BackendRequest{Name: "be0"})
if err != nil {
t.Fatalf("PauseBackend: %v", err)
}
@@ -206,7 +206,7 @@ func TestPauseResumeBackend(t *testing.T) {
t.Errorf("after pause: got %q, want paused", info.State)
}
info, err = client.ResumeBackend(ctx, &PauseResumeRequest{Name: "be0"})
info, err = client.ResumeBackend(ctx, &BackendRequest{Name: "be0"})
if err != nil {
t.Fatalf("ResumeBackend: %v", err)
}
@@ -215,6 +215,78 @@ func TestPauseResumeBackend(t *testing.T) {
}
}
func TestSetFrontendPoolBackendWeight(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
info, err := client.SetFrontendPoolBackendWeight(ctx, &SetWeightRequest{
Frontend: "web",
Pool: "primary",
Backend: "be0",
Weight: 42,
})
if err != nil {
t.Fatalf("SetFrontendPoolBackendWeight: %v", err)
}
if len(info.Pools) == 0 || len(info.Pools[0].Backends) == 0 {
t.Fatal("response missing pools/backends")
}
if info.Pools[0].Backends[0].Weight != 42 {
t.Errorf("weight: got %d, want 42", info.Pools[0].Backends[0].Weight)
}
// Invalid weight.
_, err = client.SetFrontendPoolBackendWeight(ctx, &SetWeightRequest{
Frontend: "web", Pool: "primary", Backend: "be0", Weight: 101,
})
if err == nil {
t.Error("expected error for weight 101")
}
// Unknown frontend.
_, err = client.SetFrontendPoolBackendWeight(ctx, &SetWeightRequest{
Frontend: "nope", Pool: "primary", Backend: "be0", Weight: 50,
})
if err == nil {
t.Error("expected error for unknown frontend")
}
}
func TestEnableDisableBackend(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
info, err := client.DisableBackend(ctx, &BackendRequest{Name: "be0"})
if err != nil {
t.Fatalf("DisableBackend: %v", err)
}
if info.State != "removed" {
t.Errorf("after disable: got %q, want removed", info.State)
}
if info.Enabled {
t.Error("after disable: Enabled should be false")
}
info, err = client.EnableBackend(ctx, &BackendRequest{Name: "be0"})
if err != nil {
t.Fatalf("EnableBackend: %v", err)
}
if info.State != "unknown" {
t.Errorf("after enable: got %q, want unknown", info.State)
}
if !info.Enabled {
t.Error("after enable: Enabled should be true")
}
}
func TestListHealthChecks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -266,7 +338,7 @@ func TestGetHealthCheckNotFound(t *testing.T) {
}
}
func TestWatchBackendEventsServerShutdown(t *testing.T) {
func TestWatchEventsServerShutdown(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -277,11 +349,11 @@ func TestWatchBackendEventsServerShutdown(t *testing.T) {
client, cleanup := startTestServer(t, srvCtx, c)
defer cleanup()
stream, err := client.WatchBackendEvents(ctx, &WatchRequest{})
stream, err := client.WatchEvents(ctx, &WatchRequest{})
if err != nil {
t.Fatalf("WatchBackendEvents: %v", err)
t.Fatalf("WatchEvents: %v", err)
}
// Drain the initial synthetic event.
// Drain the initial synthetic backend event.
if _, err := stream.Recv(); err != nil {
t.Fatalf("initial Recv: %v", err)
}
@@ -294,7 +366,7 @@ func TestWatchBackendEventsServerShutdown(t *testing.T) {
}
}
func TestWatchBackendEvents(t *testing.T) {
func TestWatchEventsBackend(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -302,17 +374,72 @@ func TestWatchBackendEvents(t *testing.T) {
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
stream, err := client.WatchBackendEvents(ctx, &WatchRequest{})
stream, err := client.WatchEvents(ctx, &WatchRequest{})
if err != nil {
t.Fatalf("WatchBackendEvents: %v", err)
t.Fatalf("WatchEvents: %v", err)
}
// Should receive the current state for be0 immediately.
// Should receive the current state for be0 immediately as a BackendEvent.
ev, err := stream.Recv()
if err != nil {
t.Fatalf("Recv: %v", err)
}
if ev.BackendName != "be0" {
t.Errorf("initial event: backend=%q, want be0", ev.BackendName)
be, ok := ev.Event.(*Event_Backend)
if !ok {
t.Fatalf("expected BackendEvent, got %T", ev.Event)
}
if be.Backend.BackendName != "be0" {
t.Errorf("initial event: backend=%q, want be0", be.Backend.BackendName)
}
}
func TestWatchEventsLogOnly(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
f := false
stream, err := client.WatchEvents(ctx, &WatchRequest{Backend: &f, Frontend: &f})
if err != nil {
t.Fatalf("WatchEvents: %v", err)
}
// No initial snapshot should arrive (backend disabled). Verify by checking
// that the stream has no immediately-readable event.
recvCh := make(chan *Event, 1)
go func() {
ev, _ := stream.Recv()
recvCh <- ev
}()
select {
case ev := <-recvCh:
if _, isLog := ev.Event.(*Event_Log); !isLog {
t.Errorf("expected only LogEvents, got %T", ev.Event)
}
case <-time.After(50 * time.Millisecond):
// expected: no backend snapshot arrived
}
}
func TestWatchEventsInvalidLogLevel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c := makeTestChecker(ctx)
client, cleanup := startTestServer(t, ctx, c)
defer cleanup()
// For streaming RPCs the server error arrives on the first Recv, not on the
// initial call.
stream, err := client.WatchEvents(ctx, &WatchRequest{LogLevel: "verbose"})
if err != nil {
t.Fatalf("WatchEvents: %v", err)
}
_, err = stream.Recv()
if err == nil {
t.Fatal("expected error for invalid log_level")
}
}