initial checkin

This commit is contained in:
2026-03-24 20:30:16 +01:00
commit edc8f4296a
6 changed files with 414 additions and 0 deletions

136
main.go Normal file
View File

@@ -0,0 +1,136 @@
package main
import (
"encoding/csv"
"flag"
"fmt"
"io"
"log"
"math/rand"
"net/http"
"strconv"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type exporter struct {
mu sync.RWMutex
uptime *prometheus.GaugeVec
fetchOk prometheus.Gauge
fetchAt prometheus.Gauge
}
func newExporter(reg prometheus.Registerer) *exporter {
e := &exporter{
uptime: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "ct_log_uptime_ratio",
Help: "24h uptime ratio (01) for a CT log endpoint, sourced from gstatic.com.",
}, []string{"log_url", "endpoint"}),
fetchOk: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "ct_log_uptime_fetch_success",
Help: "1 if the last CSV fetch succeeded, 0 otherwise.",
}),
fetchAt: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "ct_log_uptime_fetch_timestamp_seconds",
Help: "Unix timestamp of the last CSV fetch attempt.",
}),
}
reg.MustRegister(e.uptime, e.fetchOk, e.fetchAt)
return e
}
func (e *exporter) fetch(csvURL string) error {
resp, err := http.Get(csvURL)
if err != nil {
return fmt.Errorf("GET %s: %w", csvURL, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status %s", resp.Status)
}
r := csv.NewReader(resp.Body)
// skip header
if _, err := r.Read(); err != nil {
return fmt.Errorf("read header: %w", err)
}
// collect new values before updating metrics
type row struct{ logURL, endpoint string; ratio float64 }
var rows []row
for {
rec, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("read row: %w", err)
}
if len(rec) < 3 {
continue
}
pct, err := strconv.ParseFloat(rec[2], 64)
if err != nil {
log.Printf("skipping row with unparseable uptime %q: %v", rec[2], err)
continue
}
rows = append(rows, row{rec[0], rec[1], pct / 100.0})
}
e.mu.Lock()
defer e.mu.Unlock()
e.uptime.Reset()
for _, row := range rows {
e.uptime.WithLabelValues(row.logURL, row.endpoint).Set(row.ratio)
}
return nil
}
func (e *exporter) run(csvURL string, interval, jitter time.Duration) {
do := func() {
e.fetchAt.SetToCurrentTime()
if err := e.fetch(csvURL); err != nil {
log.Printf("fetch error: %v", err)
e.fetchOk.Set(0)
} else {
log.Printf("fetch ok")
e.fetchOk.Set(1)
}
}
do()
for {
delta := time.Duration(rand.Int63n(int64(2*jitter))) - jitter
sleep := interval + delta
log.Printf("next fetch in %s", sleep.Round(time.Second))
time.Sleep(sleep)
do()
}
}
func main() {
addr := flag.String("listen", ":9781", "address to listen on")
csvURL := flag.String("url", "https://www.gstatic.com/ct/compliance/endpoint_uptime_24h.csv", "URL of the uptime CSV")
interval := flag.Duration("interval", 12*time.Hour, "how often to fetch the CSV")
jitter := flag.Duration("jitter", 5*time.Minute, "maximum ±jitter applied to the fetch interval")
flag.Parse()
reg := prometheus.NewRegistry()
reg.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
reg.MustRegister(prometheus.NewGoCollector())
exp := newExporter(reg)
go exp.run(*csvURL, *interval, *jitter)
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, `<html><body><a href="/metrics">metrics</a></body></html>`)
})
log.Printf("listening on %s", *addr)
log.Fatal(http.ListenAndServe(*addr, nil))
}