when maglevd rehashes its config:

- when a backend gets newly added or restarted, an event should fire; perhaps transition to 'unknown'
- when a backend gets removed, an event should fire; perhaps transition to 'removed'
- when a backend is in 'unknown' state, fast-interval is appropriate
This commit is contained in:
2026-04-11 02:16:08 +02:00
parent 530d85740e
commit 7ad183320c
3 changed files with 88 additions and 21 deletions

View File

@@ -67,10 +67,11 @@ func (c *Checker) Run(ctx context.Context) error {
c.mu.Lock()
names := activeBackendNames(c.cfg)
maxHistory := c.cfg.HealthChecker.TransitionHistory
for i, name := range names {
b := c.cfg.Backends[name]
hc := c.cfg.HealthChecks[b.HealthCheck]
c.startWorker(ctx, name, b, hc, i, len(names))
c.startWorker(ctx, name, b, hc, i, len(names), maxHistory)
}
c.mu.Unlock()
@@ -86,21 +87,25 @@ func (c *Checker) Reload(ctx context.Context, cfg *config.Config) error {
c.mu.Lock()
defer c.mu.Unlock()
maxHistory := cfg.HealthChecker.TransitionHistory
desired := map[string]struct{}{}
for _, name := range activeBackendNames(cfg) {
desired[name] = struct{}{}
}
// Stop workers no longer needed.
// Stop workers no longer needed; emit a removed event using the old frontends.
for name, w := range c.workers {
if _, ok := desired[name]; !ok {
slog.Info("backend-stop", "backend", name)
t := w.backend.Remove(maxHistory)
c.emitForBackend(name, w.backend.Address, t, c.cfg.Frontends)
w.cancel()
delete(c.workers, name)
}
}
// Add new or restart changed workers.
// Add new or restart changed workers; emit an unknown event using the new frontends.
names := activeBackendNames(cfg)
for i, name := range names {
b := cfg.Backends[name]
@@ -113,11 +118,12 @@ func (c *Checker) Reload(ctx context.Context, cfg *config.Config) error {
}
slog.Info("backend-restart", "backend", name)
w.cancel()
c.startWorker(ctx, name, b, hc, i, len(names))
c.startWorker(ctx, name, b, hc, i, len(names), maxHistory)
} else {
slog.Info("backend-start", "backend", name)
c.startWorker(ctx, name, b, hc, i, len(names))
c.startWorker(ctx, name, b, hc, i, len(names), maxHistory)
}
c.emitForBackend(name, c.workers[name].backend.Address, c.workers[name].backend.Transitions[0], cfg.Frontends)
}
c.cfg = cfg
@@ -231,7 +237,7 @@ func (c *Checker) PauseBackend(name string) (BackendSnapshot, bool) {
maxHistory := c.cfg.HealthChecker.TransitionHistory
if w.backend.Pause(maxHistory) {
slog.Info("backend-pause", "backend", name)
c.emitForBackend(name, w.backend.Address, w.backend.Transitions[0])
c.emitForBackend(name, w.backend.Address, w.backend.Transitions[0], c.cfg.Frontends)
}
return BackendSnapshot{Health: w.backend, Config: w.entry}, true
}
@@ -247,7 +253,7 @@ func (c *Checker) ResumeBackend(name string) (BackendSnapshot, bool) {
maxHistory := c.cfg.HealthChecker.TransitionHistory
if w.backend.Resume(maxHistory) {
slog.Info("backend-resume", "backend", name)
c.emitForBackend(name, w.backend.Address, w.backend.Transitions[0])
c.emitForBackend(name, w.backend.Address, w.backend.Transitions[0], c.cfg.Frontends)
}
return BackendSnapshot{Health: w.backend, Config: w.entry}, true
}
@@ -256,7 +262,7 @@ func (c *Checker) ResumeBackend(name string) (BackendSnapshot, bool) {
// startWorker creates a Backend and launches a probe goroutine.
// Must be called with c.mu held.
func (c *Checker) startWorker(ctx context.Context, name string, entry config.Backend, hc config.HealthCheck, pos, total int) {
func (c *Checker) startWorker(ctx context.Context, name string, entry config.Backend, hc config.HealthCheck, pos, total, maxHistory int) {
rise, fall := hc.Rise, hc.Fall
if entry.HealthCheck == "" {
// No healthcheck: one synthetic pass drives the backend to Up immediately.
@@ -269,6 +275,7 @@ func (c *Checker) startWorker(ctx context.Context, name string, entry config.Bac
entry: entry,
cancel: cancel,
}
w.backend.Start(maxHistory)
c.workers[name] = w
go c.runProbe(wCtx, name, pos, total)
}
@@ -369,16 +376,16 @@ func (c *Checker) runProbe(ctx context.Context, name string, pos, total int) {
"code", result.Code,
"detail", result.Detail,
)
c.emitForBackend(name, addr, t)
c.emitForBackend(name, addr, t, c.cfg.Frontends)
}
c.mu.Unlock()
}
}
// emitForBackend emits one Event per frontend that references backendName.
// Must be called with c.mu held.
func (c *Checker) emitForBackend(backendName string, addr net.IP, t health.Transition) {
for feName, fe := range c.cfg.Frontends {
// emitForBackend emits one Event per frontend that references backendName,
// using the provided frontends map. Must be called with c.mu held.
func (c *Checker) emitForBackend(backendName string, addr net.IP, t health.Transition, frontends map[string]config.Frontend) {
for feName, fe := range frontends {
for _, name := range fe.Backends {
if name == backendName {
c.emit(Event{FrontendName: feName, BackendName: backendName, Backend: addr, Transition: t})