Files
vpp-maglev/internal/grpcapi/server.go
Pim van Pelt 3227263d68 Add GoVPP integration and GetVPPInfo gRPC call
VPP client (internal/vpp/)
- New package managing connections to both VPP API and stats sockets,
  treated as a unit: if either drops, both are torn down and
  re-established together.
- Run() loop: connect, fetch version via vpe.ShowVersion, read
  /sys/boottime from the stats segment, log vpp-connect, then monitor
  with control_ping every 10s. On failure, disconnect both, retry
  after 5s.
- Registers as client name "vpp-maglev" (visible in VPP's
  "show api clients").
- Flags: --vpp-api-addr (default /run/vpp/api.sock) and
  --vpp-stats-addr (default /run/vpp/stats.sock). Empty api addr
  disables VPP integration entirely.

gRPC / proto
- Add GetVPPInfo RPC returning VPPInfo: version, build_date,
  build_directory, pid, boottime_ns, connecttime_ns. Both times are
  unix timestamps in nanoseconds — the client computes durations
  locally for display.
- Returns codes.Unavailable if VPP is disabled or not connected.

maglevc
- Add 'show vpp info' command displaying version, build-date,
  build-dir, vpp-pid, vpp-boottime (with duration), and connected
  time (with duration).
2026-04-11 22:03:28 +02:00

376 lines
12 KiB
Go

// Copyright (c) 2026, Pim van Pelt <pim@ipng.ch>
package grpcapi
import (
"context"
"log/slog"
"net"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"git.ipng.ch/ipng/vpp-maglev/internal/checker"
"git.ipng.ch/ipng/vpp-maglev/internal/config"
"git.ipng.ch/ipng/vpp-maglev/internal/health"
"git.ipng.ch/ipng/vpp-maglev/internal/vpp"
)
// Server implements the MaglevServer gRPC interface.
type Server struct {
UnimplementedMaglevServer
ctx context.Context
checker *checker.Checker
logs *LogBroadcaster
configPath string
vppClient *vpp.Client // nil when VPP integration is disabled
}
// NewServer creates a Server backed by the given Checker. logs may be nil, in
// which case log events are never sent to WatchEvents streams. configPath is
// used by CheckConfig to reload and validate the configuration file on demand.
// vppClient may be nil if VPP integration is disabled. The provided context
// controls the lifetime of streaming RPCs: cancelling it closes all active
// WatchEvents streams so that grpc.Server.GracefulStop can complete.
func NewServer(ctx context.Context, c *checker.Checker, logs *LogBroadcaster, configPath string, vppClient *vpp.Client) *Server {
return &Server{ctx: ctx, checker: c, logs: logs, configPath: configPath, vppClient: vppClient}
}
// ListFrontends returns the names of all configured frontends.
func (s *Server) ListFrontends(_ context.Context, _ *ListFrontendsRequest) (*ListFrontendsResponse, error) {
return &ListFrontendsResponse{FrontendNames: s.checker.ListFrontends()}, nil
}
// GetFrontend returns configuration details for a single frontend.
func (s *Server) GetFrontend(_ context.Context, req *GetFrontendRequest) (*FrontendInfo, error) {
fe, ok := s.checker.GetFrontend(req.Name)
if !ok {
return nil, status.Errorf(codes.NotFound, "frontend %q not found", req.Name)
}
return frontendToProto(req.Name, fe), nil
}
// ListBackends returns the names of all active backends.
func (s *Server) ListBackends(_ context.Context, _ *ListBackendsRequest) (*ListBackendsResponse, error) {
return &ListBackendsResponse{BackendNames: s.checker.ListBackends()}, nil
}
// GetBackend returns health state for a backend by name.
func (s *Server) GetBackend(_ context.Context, req *GetBackendRequest) (*BackendInfo, error) {
b, ok := s.checker.GetBackend(req.Name)
if !ok {
return nil, status.Errorf(codes.NotFound, "backend %q not found", req.Name)
}
return backendToProto(b), nil
}
// PauseBackend pauses health checking for a backend by name.
func (s *Server) PauseBackend(_ context.Context, req *BackendRequest) (*BackendInfo, error) {
b, err := s.checker.PauseBackend(req.Name)
if err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
}
return backendToProto(b), nil
}
// ResumeBackend resumes health checking for a backend by name.
func (s *Server) ResumeBackend(_ context.Context, req *BackendRequest) (*BackendInfo, error) {
b, err := s.checker.ResumeBackend(req.Name)
if err != nil {
return nil, status.Errorf(codes.FailedPrecondition, "%v", err)
}
return backendToProto(b), nil
}
// EnableBackend re-enables a previously disabled backend.
func (s *Server) EnableBackend(_ context.Context, req *BackendRequest) (*BackendInfo, error) {
b, ok := s.checker.EnableBackend(req.Name)
if !ok {
return nil, status.Errorf(codes.NotFound, "backend %q not found", req.Name)
}
return backendToProto(b), nil
}
// DisableBackend disables a backend, stopping its probe goroutine.
func (s *Server) DisableBackend(_ context.Context, req *BackendRequest) (*BackendInfo, error) {
b, ok := s.checker.DisableBackend(req.Name)
if !ok {
return nil, status.Errorf(codes.NotFound, "backend %q not found", req.Name)
}
return backendToProto(b), nil
}
// SetFrontendPoolBackendWeight updates the weight of a backend in a pool.
func (s *Server) SetFrontendPoolBackendWeight(_ context.Context, req *SetWeightRequest) (*FrontendInfo, error) {
if req.Weight < 0 || req.Weight > 100 {
return nil, status.Errorf(codes.InvalidArgument, "weight %d out of range [0, 100]", req.Weight)
}
fe, err := s.checker.SetFrontendPoolBackendWeight(req.Frontend, req.Pool, req.Backend, int(req.Weight))
if err != nil {
return nil, status.Errorf(codes.NotFound, "%v", err)
}
return frontendToProto(req.Frontend, fe), nil
}
// ListHealthChecks returns the names of all configured health checks.
func (s *Server) ListHealthChecks(_ context.Context, _ *ListHealthChecksRequest) (*ListHealthChecksResponse, error) {
return &ListHealthChecksResponse{Names: s.checker.ListHealthChecks()}, nil
}
// GetHealthCheck returns the full configuration for a health check by name.
func (s *Server) GetHealthCheck(_ context.Context, req *GetHealthCheckRequest) (*HealthCheckInfo, error) {
hc, ok := s.checker.GetHealthCheck(req.Name)
if !ok {
return nil, status.Errorf(codes.NotFound, "healthcheck %q not found", req.Name)
}
return healthCheckToProto(req.Name, hc), nil
}
// WatchEvents streams events to the client. On connect, the current state of
// all backends is sent as synthetic BackendEvents. Afterwards, live events are
// forwarded based on the filter flags in req. An unset (nil) flag defaults to
// true (subscribe). An empty log_level defaults to "info".
func (s *Server) WatchEvents(req *WatchRequest, stream Maglev_WatchEventsServer) error {
wantLog := req.Log == nil || *req.Log
wantBackend := req.Backend == nil || *req.Backend
wantFrontend := req.Frontend == nil || *req.Frontend
_ = wantFrontend // no frontend events emitted yet
logLevel := slog.LevelInfo
if req.LogLevel != "" {
if err := logLevel.UnmarshalText([]byte(req.LogLevel)); err != nil {
return status.Errorf(codes.InvalidArgument, "invalid log_level %q: must be debug, info, warn, or error", req.LogLevel)
}
}
// Subscribe to log events (nil channel blocks forever when not wanted).
var logCh <-chan *LogEvent
if wantLog && s.logs != nil {
var unsub func()
logCh, unsub = s.logs.Subscribe(logLevel)
defer unsub()
}
// Subscribe to backend events; send initial state snapshot first.
var backendCh <-chan checker.Event
if wantBackend {
for _, name := range s.checker.ListBackends() {
snap, ok := s.checker.GetBackend(name)
if !ok {
continue
}
ev := &Event{Event: &Event_Backend{Backend: &BackendEvent{
BackendName: name,
Transition: &TransitionRecord{
From: snap.Health.State.String(),
To: snap.Health.State.String(),
AtUnixNs: 0,
},
}}}
if err := stream.Send(ev); err != nil {
return err
}
}
var unsub func()
backendCh, unsub = s.checker.Subscribe()
defer unsub()
}
for {
select {
case <-s.ctx.Done():
return status.Error(codes.Unavailable, "server shutting down")
case <-stream.Context().Done():
return nil
case le, ok := <-logCh:
if !ok {
return nil
}
if err := stream.Send(&Event{Event: &Event_Log{Log: le}}); err != nil {
return err
}
case e, ok := <-backendCh:
if !ok {
return nil
}
if err := stream.Send(&Event{Event: &Event_Backend{Backend: &BackendEvent{
BackendName: e.BackendName,
Transition: transitionToProto(e.Transition),
}}}); err != nil {
return err
}
}
}
}
// CheckConfig reads and validates the configuration file, returning a
// structured result that distinguishes YAML parse errors from semantic errors.
func (s *Server) CheckConfig(_ context.Context, _ *CheckConfigRequest) (*CheckConfigResponse, error) {
slog.Info("config-check-start", "path", s.configPath)
_, result := config.Check(s.configPath)
resp := &CheckConfigResponse{
Ok: result.OK(),
ParseError: result.ParseError,
SemanticError: result.SemanticError,
}
if result.OK() {
slog.Info("config-check-done", "result", "ok")
} else if result.ParseError != "" {
slog.Info("config-check-done", "result", "failed", "type", "parse", "err", result.ParseError)
} else {
slog.Info("config-check-done", "result", "failed", "type", "semantic", "err", result.SemanticError)
}
return resp, nil
}
// ReloadConfig checks the configuration file and, if valid, applies it to the
// running checker. This is the same code path used by SIGHUP.
func (s *Server) ReloadConfig(_ context.Context, _ *ReloadConfigRequest) (*ReloadConfigResponse, error) {
return s.doReloadConfig(), nil
}
// TriggerReload performs a config check and reload. Intended for use by the
// SIGHUP handler so that signals and gRPC share the same code path.
func (s *Server) TriggerReload() {
s.doReloadConfig()
}
func (s *Server) doReloadConfig() *ReloadConfigResponse {
slog.Info("config-reload-start")
newCfg, result := config.Check(s.configPath)
if !result.OK() {
if result.ParseError != "" {
slog.Error("config-check-failed", "type", "parse", "err", result.ParseError)
} else {
slog.Error("config-check-failed", "type", "semantic", "err", result.SemanticError)
}
return &ReloadConfigResponse{
ParseError: result.ParseError,
SemanticError: result.SemanticError,
}
}
if err := s.checker.Reload(s.ctx, newCfg); err != nil {
slog.Error("checker-reload-error", "err", err)
return &ReloadConfigResponse{
ReloadError: err.Error(),
}
}
slog.Info("config-reload-done", "frontends", len(newCfg.Frontends))
return &ReloadConfigResponse{Ok: true}
}
// GetVPPInfo returns VPP version and runtime information.
func (s *Server) GetVPPInfo(_ context.Context, _ *GetVPPInfoRequest) (*VPPInfo, error) {
if s.vppClient == nil {
return nil, status.Error(codes.Unavailable, "VPP integration is disabled")
}
info, err := s.vppClient.GetInfo()
if err != nil {
return nil, status.Errorf(codes.Unavailable, "%v", err)
}
var boottimeNs int64
if !info.BootTime.IsZero() {
boottimeNs = info.BootTime.UnixNano()
}
return &VPPInfo{
Version: info.Version,
BuildDate: info.BuildDate,
BuildDirectory: info.BuildDirectory,
Pid: info.PID,
BoottimeNs: boottimeNs,
ConnecttimeNs: info.ConnectedSince.UnixNano(),
}, nil
}
// ---- conversion helpers ----------------------------------------------------
func frontendToProto(name string, fe config.Frontend) *FrontendInfo {
pools := make([]*PoolInfo, 0, len(fe.Pools))
for _, p := range fe.Pools {
pi := &PoolInfo{Name: p.Name}
for bName, pb := range p.Backends {
pi.Backends = append(pi.Backends, &PoolBackendInfo{
Name: bName,
Weight: int32(pb.Weight),
})
}
pools = append(pools, pi)
}
return &FrontendInfo{
Name: name,
Address: fe.Address.String(),
Protocol: fe.Protocol,
Port: uint32(fe.Port),
Description: fe.Description,
Pools: pools,
}
}
func backendToProto(snap checker.BackendSnapshot) *BackendInfo {
info := &BackendInfo{
Name: snap.Health.Name,
Address: snap.Health.Address.String(),
State: snap.Health.State.String(),
Enabled: snap.Config.Enabled,
Healthcheck: snap.Config.HealthCheck,
}
for _, t := range snap.Health.Transitions {
info.Transitions = append(info.Transitions, transitionToProto(t))
}
return info
}
func healthCheckToProto(name string, hc config.HealthCheck) *HealthCheckInfo {
info := &HealthCheckInfo{
Name: name,
Type: hc.Type,
Port: uint32(hc.Port),
IntervalNs: hc.Interval.Nanoseconds(),
FastIntervalNs: hc.FastInterval.Nanoseconds(),
DownIntervalNs: hc.DownInterval.Nanoseconds(),
TimeoutNs: hc.Timeout.Nanoseconds(),
Rise: int32(hc.Rise),
Fall: int32(hc.Fall),
}
if hc.ProbeIPv4Src != nil {
info.ProbeIpv4Src = hc.ProbeIPv4Src.String()
}
if hc.ProbeIPv6Src != nil {
info.ProbeIpv6Src = hc.ProbeIPv6Src.String()
}
if hc.HTTP != nil {
re := ""
if hc.HTTP.ResponseRegexp != nil {
re = hc.HTTP.ResponseRegexp.String()
}
info.Http = &HTTPCheckParams{
Path: hc.HTTP.Path,
Host: hc.HTTP.Host,
ResponseCodeMin: int32(hc.HTTP.ResponseCodeMin),
ResponseCodeMax: int32(hc.HTTP.ResponseCodeMax),
ResponseRegexp: re,
ServerName: hc.HTTP.ServerName,
InsecureSkipVerify: hc.HTTP.InsecureSkipVerify,
}
}
if hc.TCP != nil {
info.Tcp = &TCPCheckParams{
Ssl: hc.TCP.SSL,
ServerName: hc.TCP.ServerName,
InsecureSkipVerify: hc.TCP.InsecureSkipVerify,
}
}
return info
}
func transitionToProto(t health.Transition) *TransitionRecord {
return &TransitionRecord{
From: t.From.String(),
To: t.To.String(),
AtUnixNs: t.At.UnixNano(),
}
}
// Ensure net.IP is imported (used via b.Address.String()).
var _ = net.IP{}