Implement target selection, autodiscovery via aggregator, implement listTargets
This commit is contained in:
@@ -287,8 +287,8 @@ func TestGRPCEndToEnd(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
go NewCollectorSub(addr1, merger).Run(ctx)
|
||||
go NewCollectorSub(addr2, merger).Run(ctx)
|
||||
go NewCollectorSub(addr1, merger, NewTargetRegistry(nil)).Run(ctx)
|
||||
go NewCollectorSub(addr2, merger, NewTargetRegistry(nil)).Run(ctx)
|
||||
|
||||
// Wait for both snapshots to be applied.
|
||||
deadline := time.Now().Add(3 * time.Second)
|
||||
@@ -309,7 +309,7 @@ func TestGRPCEndToEnd(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
grpcSrv := grpc.NewServer()
|
||||
pb.RegisterLogtailServiceServer(grpcSrv, NewServer(cache, "agg-test"))
|
||||
pb.RegisterLogtailServiceServer(grpcSrv, NewServer(cache, "agg-test", NewTargetRegistry(nil)))
|
||||
go grpcSrv.Serve(lis)
|
||||
defer grpcSrv.Stop()
|
||||
|
||||
@@ -399,8 +399,8 @@ func TestDegradedCollector(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
go NewCollectorSub(addr1, merger).Run(ctx)
|
||||
go NewCollectorSub(addr2, merger).Run(ctx)
|
||||
go NewCollectorSub(addr1, merger, NewTargetRegistry(nil)).Run(ctx)
|
||||
go NewCollectorSub(addr2, merger, NewTargetRegistry(nil)).Run(ctx)
|
||||
|
||||
// Wait for col1's data to appear.
|
||||
deadline := time.Now().Add(3 * time.Second)
|
||||
|
||||
@@ -27,16 +27,21 @@ func main() {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
merger := NewMerger()
|
||||
cache := NewCache(merger, *source)
|
||||
go cache.Run(ctx)
|
||||
|
||||
var collectorAddrs []string
|
||||
for _, addr := range strings.Split(*collectors, ",") {
|
||||
addr = strings.TrimSpace(addr)
|
||||
if addr == "" {
|
||||
continue
|
||||
if addr != "" {
|
||||
collectorAddrs = append(collectorAddrs, addr)
|
||||
}
|
||||
sub := NewCollectorSub(addr, merger)
|
||||
}
|
||||
|
||||
merger := NewMerger()
|
||||
cache := NewCache(merger, *source)
|
||||
registry := NewTargetRegistry(collectorAddrs)
|
||||
go cache.Run(ctx)
|
||||
|
||||
for _, addr := range collectorAddrs {
|
||||
sub := NewCollectorSub(addr, merger, registry)
|
||||
go sub.Run(ctx)
|
||||
log.Printf("aggregator: subscribing to collector %s", addr)
|
||||
}
|
||||
@@ -46,7 +51,7 @@ func main() {
|
||||
log.Fatalf("aggregator: failed to listen on %s: %v", *listen, err)
|
||||
}
|
||||
grpcServer := grpc.NewServer()
|
||||
pb.RegisterLogtailServiceServer(grpcServer, NewServer(cache, *source))
|
||||
pb.RegisterLogtailServiceServer(grpcServer, NewServer(cache, *source, registry))
|
||||
|
||||
go func() {
|
||||
log.Printf("aggregator: gRPC listening on %s (source=%s)", *listen, *source)
|
||||
|
||||
47
cmd/aggregator/registry.go
Normal file
47
cmd/aggregator/registry.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// TargetInfo holds the display name and gRPC address of one collector target.
|
||||
type TargetInfo struct {
|
||||
Name string // collector --source value, falls back to addr until first snapshot
|
||||
Addr string // configured gRPC address
|
||||
}
|
||||
|
||||
// TargetRegistry tracks addr → display name for all configured collectors.
|
||||
// Names default to the addr and are updated to the collector's --source value
|
||||
// when the first snapshot arrives.
|
||||
type TargetRegistry struct {
|
||||
mu sync.RWMutex
|
||||
names map[string]string // addr → current name
|
||||
}
|
||||
|
||||
func NewTargetRegistry(addrs []string) *TargetRegistry {
|
||||
names := make(map[string]string, len(addrs))
|
||||
for _, a := range addrs {
|
||||
names[a] = a // default until first snapshot
|
||||
}
|
||||
return &TargetRegistry{names: names}
|
||||
}
|
||||
|
||||
// SetName updates the display name for addr (called when a snapshot arrives).
|
||||
func (r *TargetRegistry) SetName(addr, name string) {
|
||||
r.mu.Lock()
|
||||
r.names[addr] = name
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
// Targets returns all registered targets sorted by addr.
|
||||
func (r *TargetRegistry) Targets() []TargetInfo {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
out := make([]TargetInfo, 0, len(r.names))
|
||||
for addr, name := range r.names {
|
||||
out = append(out, TargetInfo{Name: name, Addr: addr})
|
||||
}
|
||||
sort.Slice(out, func(i, j int) bool { return out[i].Addr < out[j].Addr })
|
||||
return out
|
||||
}
|
||||
@@ -14,12 +14,13 @@ import (
|
||||
// Server implements pb.LogtailServiceServer backed by the aggregator Cache.
|
||||
type Server struct {
|
||||
pb.UnimplementedLogtailServiceServer
|
||||
cache *Cache
|
||||
source string
|
||||
cache *Cache
|
||||
source string
|
||||
registry *TargetRegistry
|
||||
}
|
||||
|
||||
func NewServer(cache *Cache, source string) *Server {
|
||||
return &Server{cache: cache, source: source}
|
||||
func NewServer(cache *Cache, source string, registry *TargetRegistry) *Server {
|
||||
return &Server{cache: cache, source: source, registry: registry}
|
||||
}
|
||||
|
||||
func (srv *Server) TopN(_ context.Context, req *pb.TopNRequest) (*pb.TopNResponse, error) {
|
||||
@@ -53,6 +54,16 @@ func (srv *Server) Trend(_ context.Context, req *pb.TrendRequest) (*pb.TrendResp
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (srv *Server) ListTargets(_ context.Context, _ *pb.ListTargetsRequest) (*pb.ListTargetsResponse, error) {
|
||||
resp := &pb.ListTargetsResponse{}
|
||||
if srv.registry != nil {
|
||||
for _, t := range srv.registry.Targets() {
|
||||
resp.Targets = append(resp.Targets, &pb.TargetInfo{Name: t.Name, Addr: t.Addr})
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (srv *Server) StreamSnapshots(_ *pb.SnapshotRequest, stream grpc.ServerStreamingServer[pb.Snapshot]) error {
|
||||
ch := srv.cache.Subscribe()
|
||||
defer srv.cache.Unsubscribe(ch)
|
||||
|
||||
@@ -15,12 +15,13 @@ import (
|
||||
// the collector degraded (zeroing its contribution) after 3 consecutive
|
||||
// failures.
|
||||
type CollectorSub struct {
|
||||
addr string
|
||||
merger *Merger
|
||||
addr string
|
||||
merger *Merger
|
||||
registry *TargetRegistry
|
||||
}
|
||||
|
||||
func NewCollectorSub(addr string, merger *Merger) *CollectorSub {
|
||||
return &CollectorSub{addr: addr, merger: merger}
|
||||
func NewCollectorSub(addr string, merger *Merger, registry *TargetRegistry) *CollectorSub {
|
||||
return &CollectorSub{addr: addr, merger: merger, registry: registry}
|
||||
}
|
||||
|
||||
// Run blocks until ctx is cancelled.
|
||||
@@ -92,6 +93,7 @@ func (cs *CollectorSub) stream(ctx context.Context) (bool, error) {
|
||||
return gotOne, err
|
||||
}
|
||||
gotOne = true
|
||||
cs.registry.SetName(cs.addr, snap.Source)
|
||||
cs.merger.Apply(snap)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user