Files
2026-03-25 00:09:17 +01:00

159 lines
3.9 KiB
Go

package main
import (
"encoding/csv"
"flag"
"fmt"
"io"
"log"
"math/rand"
"net/http"
"os"
"strconv"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type exporter struct {
mu sync.RWMutex
uptime *prometheus.GaugeVec
fetchOk prometheus.Gauge
fetchAt prometheus.Gauge
}
func newExporter(reg prometheus.Registerer) *exporter {
e := &exporter{
uptime: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "ct_log_uptime_ratio",
Help: "24h uptime ratio (0-1) for a CT log endpoint, sourced from gstatic.com.",
}, []string{"log_url", "endpoint"}),
fetchOk: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "ct_log_uptime_fetch_success",
Help: "1 if the last CSV fetch succeeded, 0 otherwise.",
}),
fetchAt: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "ct_log_uptime_fetch_timestamp_seconds",
Help: "Unix timestamp of the last CSV fetch attempt.",
}),
}
reg.MustRegister(e.uptime, e.fetchOk, e.fetchAt)
return e
}
func (e *exporter) fetch(csvURL string) error {
resp, err := http.Get(csvURL)
if err != nil {
return fmt.Errorf("GET %s: %w", csvURL, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status %s", resp.Status)
}
r := csv.NewReader(resp.Body)
// skip header
if _, err := r.Read(); err != nil {
return fmt.Errorf("read header: %w", err)
}
// collect new values before updating metrics
type row struct {
logURL, endpoint string
ratio float64
}
var rows []row
for {
rec, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("read row: %w", err)
}
if len(rec) < 3 {
continue
}
pct, err := strconv.ParseFloat(rec[2], 64)
if err != nil {
log.Printf("skipping row with unparseable uptime %q: %v", rec[2], err)
continue
}
rows = append(rows, row{rec[0], rec[1], pct / 100.0})
}
e.mu.Lock()
defer e.mu.Unlock()
e.uptime.Reset()
for _, row := range rows {
e.uptime.WithLabelValues(row.logURL, row.endpoint).Set(row.ratio)
}
return nil
}
func (e *exporter) run(csvURL string, interval, jitter time.Duration) {
do := func() {
e.fetchAt.SetToCurrentTime()
if err := e.fetch(csvURL); err != nil {
log.Printf("fetch error: %v", err)
e.fetchOk.Set(0)
} else {
log.Printf("fetch ok")
e.fetchOk.Set(1)
}
}
do()
for {
delta := time.Duration(rand.Int63n(int64(2*jitter))) - jitter
sleep := interval + delta
log.Printf("next fetch in %s", sleep.Round(time.Second))
time.Sleep(sleep)
do()
}
}
func envOr(key, def string) string {
if v := os.Getenv(key); v != "" {
return v
}
return def
}
func main() {
addr := flag.String("listen", envOr("LISTEN", ":9781"), "address to listen on")
csvURL := flag.String("url", envOr("URL", "https://www.gstatic.com/ct/compliance/endpoint_uptime_24h.csv"), "URL of the uptime CSV")
interval := flag.Duration("interval", func() time.Duration {
d, err := time.ParseDuration(envOr("INTERVAL", "25m"))
if err != nil {
log.Fatalf("invalid INTERVAL: %v", err)
}
return d
}(), "how often to fetch the CSV")
jitter := flag.Duration("jitter", func() time.Duration {
d, err := time.ParseDuration(envOr("JITTER", "5m"))
if err != nil {
log.Fatalf("invalid JITTER: %v", err)
}
return d
}(), "maximum +/-jitter applied to the fetch interval")
flag.Parse()
reg := prometheus.NewRegistry()
reg.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
reg.MustRegister(prometheus.NewGoCollector())
exp := newExporter(reg)
go exp.run(*csvURL, *interval, *jitter)
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, `<html><body><a href="/metrics">metrics</a></body></html>`)
})
log.Printf("listening on %s", *addr)
log.Fatal(http.ListenAndServe(*addr, nil))
}