Compare commits

..

20 Commits

Author SHA1 Message Date
Pim van Pelt
0ecca06069 Typo fix 2026-03-27 01:21:46 +01:00
Pim van Pelt
862d043376 Refactor docs 2026-03-27 01:20:19 +01:00
Pim van Pelt
50fc94b87d Add screenshot 2026-03-27 01:11:27 +01:00
Pim van Pelt
6c3a28c9ce Pin fine/coarse mergeDump to the 1min/5min boundary, fixes sparkline 2026-03-25 18:14:20 +01:00
Pim van Pelt
d0fb34160f Clean up logging, update website filter hint 2026-03-25 07:43:27 +01:00
Pim van Pelt
456452afc4 Allow !~= for website/uri 2026-03-25 07:32:39 +01:00
Pim van Pelt
eddb04ced4 Add aggregator backfill, pulling fine+coarse buckets from collectors 2026-03-25 07:06:03 +01:00
Pim van Pelt
d2dcd88c4b Add Docker setup, add environment vars for each flag 2026-03-25 06:41:13 +01:00
Pim van Pelt
129246d85b Always render all sources from default target 2026-03-25 05:35:40 +01:00
Pim van Pelt
b3103834d0 Add Grafana dashboard 2026-03-24 04:50:24 +01:00
Pim van Pelt
91eb56a64c Add prometheus exporter on :9100 2026-03-24 03:49:22 +01:00
Pim van Pelt
c7f8455188 go fmt 2026-03-24 02:30:18 +01:00
Pim van Pelt
30c8c40157 Add ASN to logtail, collector, aggregator, frontend and CLI 2026-03-24 02:28:29 +01:00
Pim van Pelt
a798bb1d1d Add Claude callout, haters gotta hate 2026-03-23 22:22:43 +01:00
Pim van Pelt
625cada480 Add note and LICENSE 2026-03-23 22:21:30 +01:00
Pim van Pelt
0fb84813a5 Enable lame-duck on exit, so systemctl can 'restart' collectors 2026-03-23 22:18:05 +01:00
Pim van Pelt
cd7f15afaf Add is_tor plumbing from collector->aggregator->frontend/cli 2026-03-23 22:17:39 +01:00
Pim van Pelt
b89caa594c Auto-rediscover new glob patterns 2026-03-23 20:39:12 +01:00
Pim van Pelt
d3160c7dd4 Print peer address 2026-03-16 02:45:47 +01:00
Pim van Pelt
1c7637fbc3 Output single list of json objects 2026-03-16 02:42:04 +01:00
45 changed files with 4876 additions and 746 deletions

14
Dockerfile Normal file
View File

@@ -0,0 +1,14 @@
FROM golang:1.24-alpine AS builder
WORKDIR /src
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 go build -trimpath -ldflags="-s -w" -o /out/collector ./cmd/collector && \
CGO_ENABLED=0 go build -trimpath -ldflags="-s -w" -o /out/aggregator ./cmd/aggregator && \
CGO_ENABLED=0 go build -trimpath -ldflags="-s -w" -o /out/frontend ./cmd/frontend && \
CGO_ENABLED=0 go build -trimpath -ldflags="-s -w" -o /out/cli ./cmd/cli
FROM scratch
COPY --from=builder /out/ /usr/local/bin/

202
LICENSE Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

411
README.md
View File

@@ -1,396 +1,31 @@
SPECIFICATION ## PREAMBLE
This project contains four programs: Although this computer program has a permissive license (AP2.0), if you came here looking to ask
questions, you're better off just moving on :) This program is shared AS-IS and really without any
intent for anybody but IPng Networks to use it. Also, in case the structure of the repo and the
style of this README wasn't already clear, this program is 100% written and maintained by Claude
Code.
1) A **collector** that tails any number of nginx log files and maintains an in-memory structure of You have been warned :)
`{website, client_prefix, http_request_uri, http_response}` counts across all files. It answers
TopN and Trend queries via gRPC and pushes minute snapshots to the aggregator via server-streaming.
Runs on each nginx machine in the cluster. No UI — gRPC interface only.
2) An **aggregator** that subscribes to the snapshot stream from all collectors, merges their data ![nginx-logtail frontend](docs/frontend.png)
into a unified in-memory cache, and exposes the same gRPC interface. Answers questions like "what
is the busiest website globally", "which client prefix is causing the most HTTP 503s", and shows
trending information useful for DDoS detection. Runs on a central machine.
3) An **HTTP frontend** companion to the aggregator that renders a drilldown dashboard. Operators ## What is this?
can restrict by `http_response=429`, then by `website=www.example.com`, and so on. Works with
either a collector or aggregator as its backend. Zero JavaScript — server-rendered HTML with inline
SVG sparklines and meta-refresh.
4) A **CLI** for shell-based debugging. Sends `topn`, `trend`, and `stream` queries to any This project consists of four components:
collector or aggregator, fans out to multiple targets in parallel, and outputs human-readable 1. A log collector that tails NGINX (or Apache) logs of a certain format, and aggregates
tables or newline-delimited JSON. information per website, client address, status, and so on. It buckets these into windows
of 1min, 5min, 15min, 60min, 6hrs and 24hrs. It exposes this on a gRPC endpoint.
1. An aggregator that can scrape any number of collectors into a merged regional (or global)
view. The aggregator exposes the same gRPC endpoint as the collectors.
1. A Frontend that allows to query this data structure very quickly.
1. A CLI that allows to query this data also, returning JSON for further processing.
Programs are written in Go. No CGO, no external runtime dependencies. It's written in Go, and is meant to deploy collectors on any number of webservers, and central
aggregation and frontend logic. It's released under [[APACHE](LICENSE)] license. It can be run
either as `systemd` units, or in Docker, or any combination of the two.
--- See [[User Guide](docs/USERGUIDE.md)] or [[DETAILS](docs/DETAILS.md)] for more information.
DESIGN The [[docs/](docs/)] directory contains extensive planning information which shows how Claude
Code single-shot implemented the whole system in March 2026.
## Directory Layout
```
nginx-logtail/
├── proto/
│ ├── logtail.proto # shared protobuf definitions
│ └── logtailpb/
│ ├── logtail.pb.go # generated: messages, enums
│ └── logtail_grpc.pb.go # generated: service stubs
├── internal/
│ └── store/
│ └── store.go # shared types: Tuple4, Entry, Snapshot, ring helpers
└── cmd/
├── collector/
│ ├── main.go
│ ├── tailer.go # MultiTailer: tail N files via one shared fsnotify watcher
│ ├── parser.go # tab-separated logtail log_format parser (~50 ns/line)
│ ├── store.go # bounded top-K in-memory store + tiered ring buffers
│ └── server.go # gRPC server: TopN, Trend, StreamSnapshots
├── aggregator/
│ ├── main.go
│ ├── subscriber.go # one goroutine per collector; StreamSnapshots with backoff
│ ├── merger.go # delta-merge: O(snapshot_size) per update
│ ├── cache.go # tick-based ring buffer cache served to clients
│ ├── registry.go # TargetRegistry: addr→name map updated from snapshot sources
│ └── server.go # gRPC server (same surface as collector)
├── frontend/
│ ├── main.go
│ ├── handler.go # URL param parsing, concurrent TopN+Trend, template exec
│ ├── filter.go # ParseFilterExpr / FilterExprString mini filter language
│ ├── client.go # gRPC dial helper
│ ├── sparkline.go # TrendPoints → inline SVG polyline
│ ├── format.go # fmtCount (space thousands separator)
│ └── templates/
│ ├── base.html # outer HTML shell, inline CSS, meta-refresh
│ └── index.html # window tabs, group-by tabs, breadcrumb, table, footer
└── cli/
├── main.go # subcommand dispatch and usage
├── flags.go # shared flags, parseTargets, buildFilter, parseWindow
├── client.go # gRPC dial helper
├── format.go # printTable, fmtCount, fmtTime, targetHeader
├── cmd_topn.go # topn: concurrent fan-out, table + JSON output
├── cmd_trend.go # trend: concurrent fan-out, table + JSON output
├── cmd_stream.go # stream: multiplexed streams, auto-reconnect
└── cmd_targets.go # targets: list collectors known to the endpoint
```
## Data Model
The core unit is a **count keyed by four dimensions**:
| Field | Description | Example |
|-------------------|------------------------------------------------------|-------------------|
| `website` | nginx `$host` | `www.example.com` |
| `client_prefix` | client IP truncated to /24 IPv4 or /48 IPv6 | `1.2.3.0/24` |
| `http_request_uri`| `$request_uri` path only — query string stripped | `/api/v1/search` |
| `http_response` | HTTP status code | `429` |
## Time Windows & Tiered Ring Buffers
Two ring buffers at different resolutions cover all query windows up to 24 hours:
| Tier | Bucket size | Buckets | Top-K/bucket | Covers | Roll-up trigger |
|--------|-------------|---------|--------------|--------|---------------------|
| Fine | 1 min | 60 | 50 000 | 1 h | every minute |
| Coarse | 5 min | 288 | 5 000 | 24 h | every 5 fine ticks |
Supported query windows and which tier they read from:
| Window | Tier | Buckets summed |
|--------|--------|----------------|
| 1 min | fine | last 1 |
| 5 min | fine | last 5 |
| 15 min | fine | last 15 |
| 60 min | fine | all 60 |
| 6 h | coarse | last 72 |
| 24 h | coarse | all 288 |
Every minute: snapshot live map → top-50K → append to fine ring, reset live map.
Every 5 minutes: merge last 5 fine snapshots → top-5K → append to coarse ring.
## Memory Budget (Collector, target ≤ 1 GB)
Entry size: ~30 B website + ~15 B prefix + ~50 B URI + 3 B status + 8 B count + ~80 B Go map
overhead ≈ **~186 bytes per entry**.
| Structure | Entries | Size |
|-------------------------|-------------|-------------|
| Live map (capped) | 100 000 | ~19 MB |
| Fine ring (60 × 1-min) | 60 × 50 000 | ~558 MB |
| Coarse ring (288 × 5-min)| 288 × 5 000| ~268 MB |
| **Total** | | **~845 MB** |
The live map is **hard-capped at 100 K entries**. Once full, only updates to existing keys are
accepted; new keys are dropped until the next rotation resets the map. This keeps memory bounded
regardless of attack cardinality.
## Future Work — ClickHouse Export (post-MVP)
> **Do not implement until the end-to-end MVP is running.**
The aggregator will optionally write 1-minute pre-aggregated rows to ClickHouse for 7d/30d
historical views. Schema sketch:
```sql
CREATE TABLE logtail (
ts DateTime,
website LowCardinality(String),
client_prefix String,
request_uri LowCardinality(String),
status UInt16,
count UInt64
) ENGINE = SummingMergeTree(count)
PARTITION BY toYYYYMMDD(ts)
ORDER BY (ts, website, status, client_prefix, request_uri);
```
The frontend routes `window=7d|30d` queries to ClickHouse; all shorter windows continue to use
the in-memory cache. Kafka is not needed — the aggregator writes directly. This is purely additive
and does not change any existing interface.
## Protobuf API (`proto/logtail.proto`)
```protobuf
enum StatusOp { EQ = 0; NE = 1; GT = 2; GE = 3; LT = 4; LE = 5; }
message Filter {
optional string website = 1;
optional string client_prefix = 2;
optional string http_request_uri = 3;
optional int32 http_response = 4;
StatusOp status_op = 5; // comparison operator for http_response
optional string website_regex = 6; // RE2 regex against website
optional string uri_regex = 7; // RE2 regex against http_request_uri
}
enum GroupBy { WEBSITE = 0; CLIENT_PREFIX = 1; REQUEST_URI = 2; HTTP_RESPONSE = 3; }
enum Window { W1M = 0; W5M = 1; W15M = 2; W60M = 3; W6H = 4; W24H = 5; }
message TopNRequest { Filter filter = 1; GroupBy group_by = 2; int32 n = 3; Window window = 4; }
message TopNEntry { string label = 1; int64 count = 2; }
message TopNResponse { repeated TopNEntry entries = 1; string source = 2; }
// Trend: one total count per minute (or 5-min) bucket, for sparklines
message TrendRequest { Filter filter = 1; Window window = 4; }
message TrendPoint { int64 timestamp_unix = 1; int64 count = 2; }
message TrendResponse { repeated TrendPoint points = 1; string source = 2; }
// Streaming: collector pushes a fine snapshot after every minute rotation
message SnapshotRequest {}
message Snapshot {
string source = 1;
int64 timestamp = 2;
repeated TopNEntry entries = 3; // full top-50K for this bucket
}
// Target discovery: list the collectors behind the queried endpoint
message ListTargetsRequest {}
message TargetInfo {
string name = 1; // display name (--source value from the collector)
string addr = 2; // gRPC address; empty string means "this endpoint itself"
}
message ListTargetsResponse { repeated TargetInfo targets = 1; }
service LogtailService {
rpc TopN(TopNRequest) returns (TopNResponse);
rpc Trend(TrendRequest) returns (TrendResponse);
rpc StreamSnapshots(SnapshotRequest) returns (stream Snapshot);
rpc ListTargets(ListTargetsRequest) returns (ListTargetsResponse);
}
// Both collector and aggregator implement LogtailService.
// The aggregator's StreamSnapshots re-streams the merged view.
// ListTargets: aggregator returns all configured collectors; collector returns itself.
```
## Program 1 — Collector
### tailer.go
- **`MultiTailer`**: one shared `fsnotify.Watcher` for all files regardless of count — avoids
the inotify instance limit when tailing hundreds of files.
- On `WRITE` event: read all new lines from that file's `bufio.Reader`.
- On `RENAME`/`REMOVE` (logrotate): drain old fd to EOF, close, start retry-open goroutine with
exponential backoff. Sends the new `*os.File` back via a channel to keep map access single-threaded.
- Emits `LogRecord` structs on a shared buffered channel (capacity 200 K — absorbs ~20 s of peak).
- Accepts paths via `--logs` (comma-separated or glob) and `--logs-file` (one path/glob per line).
### parser.go
- Parses the fixed **logtail** nginx log format — tab-separated, fixed field order, no quoting:
```nginx
log_format logtail '$host\t$remote_addr\t$msec\t$request_method\t$request_uri\t$status\t$body_bytes_sent\t$request_time';
```
| # | Field | Used for |
|---|-------------------|------------------|
| 0 | `$host` | website |
| 1 | `$remote_addr` | client_prefix |
| 2 | `$msec` | (discarded) |
| 3 | `$request_method` | (discarded) |
| 4 | `$request_uri` | http_request_uri |
| 5 | `$status` | http_response |
| 6 | `$body_bytes_sent`| (discarded) |
| 7 | `$request_time` | (discarded) |
- `strings.SplitN(line, "\t", 8)` — ~50 ns/line. No regex.
- `$request_uri`: query string discarded at first `?`.
- `$remote_addr`: truncated to /24 (IPv4) or /48 (IPv6); prefix lengths configurable via flags.
- Lines with fewer than 8 fields are silently skipped.
### store.go
- **Single aggregator goroutine** reads from the channel and updates the live map — no locking on
the hot path. At 10 K lines/s the goroutine uses <1% CPU.
- Live map: `map[Tuple4]int64`, hard-capped at 100 K entries (new keys dropped when full).
- **Minute ticker**: heap-selects top-50K entries, writes snapshot to fine ring, resets live map.
- Every 5 fine ticks: merge last 5 fine snapshots → top-5K → write to coarse ring.
- **TopN query**: RLock ring, sum bucket range, apply filter, group by dimension, heap-select top N.
- **Trend query**: per-bucket filtered sum, returns one `TrendPoint` per bucket.
- **Subscriber fan-out**: per-subscriber buffered channel; `Subscribe`/`Unsubscribe` for streaming.
### server.go
- gRPC server on configurable port (default `:9090`).
- `TopN` and `Trend`: unary, answered from the ring buffer under RLock.
- `StreamSnapshots`: registers a subscriber channel; loops `Recv` on it; 30 s keepalive ticker.
## Program 2 — Aggregator
### subscriber.go
- One goroutine per collector. Dials, calls `StreamSnapshots`, forwards each `Snapshot` to the
merger.
- Reconnects with exponential backoff (100 ms → doubles → cap 30 s).
- After 3 consecutive failures: calls `merger.Zero(addr)` to remove that collector's contribution
from the merged view (prevents stale counts accumulating during outages).
- Resets failure count on first successful `Recv`; logs recovery.
### merger.go
- **Delta strategy**: on each new snapshot from collector X, subtract X's previous entries from
`merged`, add the new entries, store new map. O(snapshot_size) per update — not
O(N_collectors × snapshot_size).
- `Zero(addr)`: subtracts the collector's last-known contribution and deletes its entry — called
when a collector is marked degraded.
### cache.go
- **Tick-based rotation** (1-min ticker, not snapshot-triggered): keeps the aggregator ring aligned
to the same 1-minute cadence as collectors regardless of how many collectors are connected.
- Same tiered ring structure as the collector store; populated from `merger.TopK()` each tick.
- `QueryTopN`, `QueryTrend`, `Subscribe`/`Unsubscribe` — identical interface to collector store.
### registry.go
- **`TargetRegistry`**: `sync.RWMutex`-protected `map[addr → name]`. Initialised with the
configured collector addresses; display names are updated from the `source` field of the first
snapshot received from each collector.
- `Targets()` returns a stable sorted slice of `{name, addr}` pairs for `ListTargets` responses.
### server.go
- Implements `LogtailService` backed by the cache (not live fan-out).
- `StreamSnapshots` re-streams merged fine snapshots; usable by a second-tier aggregator or
monitoring system.
- `ListTargets` returns the current `TargetRegistry` contents — all configured collectors with
their display names and gRPC addresses.
## Program 3 — Frontend
### handler.go
- All filter state in the **URL query string**: `w` (window), `by` (group_by), `f_website`,
`f_prefix`, `f_uri`, `f_status`, `f_website_re`, `f_uri_re`, `n`, `target`. No server-side
session — URLs are shareable and bookmarkable; multiple operators see independent views.
- **Filter expression box**: a `q=` parameter carries a mini filter language
(`status>=400 AND website~=gouda.* AND uri~=^/api/`). On submission the handler parses it
via `ParseFilterExpr` and redirects to the canonical URL with individual `f_*` params; `q=`
never appears in the final URL. Parse errors re-render the current page with an inline message.
- **Status expressions**: `f_status` accepts `200`, `!=200`, `>=400`, `<500`, etc. — parsed by
`store.ParseStatusExpr` into `(value, StatusOp)` for the filter protobuf.
- **Regex filters**: `f_website_re` and `f_uri_re` hold RE2 patterns; compiled once per request
into `store.CompiledFilter` before the query-loop iteration. Invalid regexes match nothing.
- `TopN`, `Trend`, and `ListTargets` RPCs issued **concurrently** (all with a 5 s deadline); page
renders with whatever completes. Trend failure suppresses the sparkline; `ListTargets` failure
hides the source picker — both are non-fatal.
- **Source picker**: `ListTargets` result drives a `source:` tab row. Clicking a collector tab
sets `target=` to that collector's address, querying it directly. The "all" tab resets to the
default aggregator. Picker is hidden when `ListTargets` returns ≤0 collectors (direct collector
mode).
- **Drilldown**: clicking a table row adds the current dimension's filter and advances `by` through
`website → prefix → uri → status → website` (cycles).
- **`raw=1`**: returns the TopN result as JSON — same URL, no CLI needed for scripting.
- **`target=` override**: per-request gRPC endpoint override for comparing sources.
- Error pages render at HTTP 502 with the window/group-by tabs still functional.
### sparkline.go
- `renderSparkline([]*pb.TrendPoint) template.HTML` — fixed `viewBox="0 0 300 60"` SVG,
Y-scaled to max count, rendered as `<polyline>`. Returns `""` for fewer than 2 points or
all-zero data.
### templates/
- `base.html`: outer shell, inline CSS (~40 lines), conditional `<meta http-equiv="refresh">`.
- `index.html`: window tabs, group-by tabs, filter breadcrumb with `×` remove links, sparkline,
TopN table with `<meter>` bars (% relative to rank-1), footer with source and refresh info.
- No external CSS, no web fonts, no JavaScript. Renders in w3m/lynx.
## Program 4 — CLI
### Subcommands
```
logtail-cli topn [flags] ranked label → count table (exits after one response)
logtail-cli trend [flags] per-bucket time series (exits after one response)
logtail-cli stream [flags] live snapshot feed (runs until Ctrl-C, auto-reconnects)
logtail-cli targets [flags] list targets known to the queried endpoint
```
### Flags
**Shared** (all subcommands):
| Flag | Default | Description |
|--------------|------------------|----------------------------------------------------------|
| `--target` | `localhost:9090` | Comma-separated `host:port` list; fan-out to all |
| `--json` | false | Emit newline-delimited JSON instead of a table |
| `--website` | — | Filter: website |
| `--prefix` | — | Filter: client prefix |
| `--uri` | — | Filter: request URI |
| `--status` | — | Filter: HTTP status expression (`200`, `!=200`, `>=400`, `<500`, …) |
| `--website-re`| — | Filter: RE2 regex against website |
| `--uri-re` | — | Filter: RE2 regex against request URI |
**`topn` only**: `--n 10`, `--window 5m`, `--group-by website`
**`trend` only**: `--window 5m`
### Multi-target fan-out
`--target` accepts a comma-separated list. All targets are queried concurrently; results are
printed in order with a per-target header. Single-target output omits the header for clean
pipe-to-`jq` use.
### Output
Default: human-readable table with space-separated thousands (`18 432`).
`--json`: one JSON object per target (NDJSON for `stream`).
`stream` reconnects automatically on error (5 s backoff). All other subcommands exit immediately
with a non-zero code on gRPC error.
## Key Design Decisions
| Decision | Rationale |
|----------|-----------|
| Single aggregator goroutine in collector | Eliminates all map lock contention on the 10 K/s hot path |
| Hard cap live map at 100 K entries | Bounds memory regardless of DDoS cardinality explosion |
| Ring buffer of sorted snapshots (not raw maps) | TopN queries avoid re-sorting; merge is a single heap pass |
| Push-based streaming (collector → aggregator) | Aggregator cache always fresh; query latency is cache-read only |
| Delta merge in aggregator | O(snapshot_size) per update, not O(N_collectors × size) |
| Tick-based cache rotation in aggregator | Ring stays on the same 1-min cadence regardless of collector count |
| Degraded collector zeroing | Stale counts from failed collectors don't accumulate in the merged view |
| Same `LogtailService` for collector and aggregator | CLI and frontend work with either; no special-casing |
| `internal/store` shared package | ~200 lines of ring-buffer logic shared between collector and aggregator |
| Filter state in URL, not session cookie | Multiple concurrent operators; shareable/bookmarkable URLs |
| Query strings stripped at ingest | Major cardinality reduction; prevents URI explosion under attack |
| No persistent storage | Simplicity; acceptable for ops dashboards (restart = lose history) |
| Trusted internal network, no TLS | Reduces operational complexity; add a TLS proxy if needed later |
| Server-side SVG sparklines, meta-refresh | Zero JS dependencies; works in terminal browsers and curl |
| CLI default: human-readable table | Operator-friendly by default; `--json` opt-in for scripting |
| CLI multi-target fan-out | Compare a collector vs. aggregator, or two collectors, in one command |
| CLI uses stdlib `flag`, no framework | Four subcommands don't justify a dependency |
| Status filter as expression string (`!=200`, `>=400`) | Operator-friendly; parsed once at query boundary, encoded as `(int32, StatusOp)` in proto |
| Regex filters compiled once per query (`CompiledFilter`) | Up to 288 × 5 000 per-entry calls — compiling per-entry would dominate query latency |
| Filter expression box (`q=`) redirects to canonical URL | Filter state stays in individual `f_*` params; URLs remain shareable and bookmarkable |
| `ListTargets` + frontend source picker (no Tuple5) | "Which nginx is busiest?" answered by switching `target=` to a collector; no data model changes, no extra memory |

View File

@@ -163,8 +163,8 @@ func TestCacheCoarseRing(t *testing.T) {
func TestCacheQueryTopN(t *testing.T) { func TestCacheQueryTopN(t *testing.T) {
m := NewMerger() m := NewMerger()
m.Apply(makeSnap("c1", map[string]int64{ m.Apply(makeSnap("c1", map[string]int64{
st.EncodeTuple(st.Tuple4{"busy.com", "1.0.0.0/24", "/", "200"}): 300, st.EncodeTuple(st.Tuple6{Website: "busy.com", Prefix: "1.0.0.0/24", URI: "/", Status: "200"}): 300,
st.EncodeTuple(st.Tuple4{"quiet.com", "2.0.0.0/24", "/", "200"}): 50, st.EncodeTuple(st.Tuple6{Website: "quiet.com", Prefix: "2.0.0.0/24", URI: "/", Status: "200"}): 50,
})) }))
cache := NewCache(m, "test") cache := NewCache(m, "test")
@@ -181,8 +181,8 @@ func TestCacheQueryTopN(t *testing.T) {
func TestCacheQueryTopNWithFilter(t *testing.T) { func TestCacheQueryTopNWithFilter(t *testing.T) {
m := NewMerger() m := NewMerger()
status429 := st.EncodeTuple(st.Tuple4{"example.com", "1.0.0.0/24", "/api", "429"}) status429 := st.EncodeTuple(st.Tuple6{Website: "example.com", Prefix: "1.0.0.0/24", URI: "/api", Status: "429"})
status200 := st.EncodeTuple(st.Tuple4{"example.com", "2.0.0.0/24", "/api", "200"}) status200 := st.EncodeTuple(st.Tuple6{Website: "example.com", Prefix: "2.0.0.0/24", URI: "/api", Status: "200"})
m.Apply(makeSnap("c1", map[string]int64{status429: 200, status200: 500})) m.Apply(makeSnap("c1", map[string]int64{status429: 200, status200: 500}))
cache := NewCache(m, "test") cache := NewCache(m, "test")
@@ -202,7 +202,7 @@ func TestCacheQueryTrend(t *testing.T) {
for i, count := range []int64{10, 20, 30} { for i, count := range []int64{10, 20, 30} {
m.Apply(makeSnap("c1", map[string]int64{ m.Apply(makeSnap("c1", map[string]int64{
st.EncodeTuple(st.Tuple4{"x.com", "1.0.0.0/24", "/", "200"}): count, st.EncodeTuple(st.Tuple6{Website: "x.com", Prefix: "1.0.0.0/24", URI: "/", Status: "200"}): count,
})) }))
cache.rotate(now.Add(time.Duration(i) * time.Minute)) cache.rotate(now.Add(time.Duration(i) * time.Minute))
} }
@@ -270,12 +270,12 @@ func startFakeCollector(t *testing.T, snaps []*pb.Snapshot) string {
func TestGRPCEndToEnd(t *testing.T) { func TestGRPCEndToEnd(t *testing.T) {
// Two fake collectors with overlapping labels. // Two fake collectors with overlapping labels.
snap1 := makeSnap("col1", map[string]int64{ snap1 := makeSnap("col1", map[string]int64{
st.EncodeTuple(st.Tuple4{"busy.com", "1.0.0.0/24", "/", "200"}): 500, st.EncodeTuple(st.Tuple6{Website: "busy.com", Prefix: "1.0.0.0/24", URI: "/", Status: "200"}): 500,
st.EncodeTuple(st.Tuple4{"quiet.com", "2.0.0.0/24", "/", "429"}): 100, st.EncodeTuple(st.Tuple6{Website: "quiet.com", Prefix: "2.0.0.0/24", URI: "/", Status: "429"}): 100,
}) })
snap2 := makeSnap("col2", map[string]int64{ snap2 := makeSnap("col2", map[string]int64{
st.EncodeTuple(st.Tuple4{"busy.com", "3.0.0.0/24", "/", "200"}): 300, st.EncodeTuple(st.Tuple6{Website: "busy.com", Prefix: "3.0.0.0/24", URI: "/", Status: "200"}): 300,
st.EncodeTuple(st.Tuple4{"other.com", "4.0.0.0/24", "/", "200"}): 50, st.EncodeTuple(st.Tuple6{Website: "other.com", Prefix: "4.0.0.0/24", URI: "/", Status: "200"}): 50,
}) })
addr1 := startFakeCollector(t, []*pb.Snapshot{snap1}) addr1 := startFakeCollector(t, []*pb.Snapshot{snap1})
addr2 := startFakeCollector(t, []*pb.Snapshot{snap2}) addr2 := startFakeCollector(t, []*pb.Snapshot{snap2})
@@ -388,7 +388,7 @@ func TestGRPCEndToEnd(t *testing.T) {
func TestDegradedCollector(t *testing.T) { func TestDegradedCollector(t *testing.T) {
// Start one real and one immediately-gone collector. // Start one real and one immediately-gone collector.
snap1 := makeSnap("col1", map[string]int64{ snap1 := makeSnap("col1", map[string]int64{
st.EncodeTuple(st.Tuple4{"good.com", "1.0.0.0/24", "/", "200"}): 100, st.EncodeTuple(st.Tuple6{Website: "good.com", Prefix: "1.0.0.0/24", URI: "/", Status: "200"}): 100,
}) })
addr1 := startFakeCollector(t, []*pb.Snapshot{snap1}) addr1 := startFakeCollector(t, []*pb.Snapshot{snap1})
// addr2 points at nothing — connections will fail immediately. // addr2 points at nothing — connections will fail immediately.

165
cmd/aggregator/backfill.go Normal file
View File

@@ -0,0 +1,165 @@
package main
import (
"context"
"io"
"log"
"sort"
"time"
st "git.ipng.ch/ipng/nginx-logtail/internal/store"
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
)
// Backfill calls DumpSnapshots on all collectors concurrently, merges their
// data per timestamp, and loads the result into the cache. It blocks until all
// collectors have responded or the context is cancelled.
func Backfill(ctx context.Context, collectorAddrs []string, cache *Cache) {
type result struct {
fine []st.Snapshot
coarse []st.Snapshot
}
ch := make(chan result, len(collectorAddrs))
for _, addr := range collectorAddrs {
addr := addr
go func() {
start := time.Now()
fine, coarse, err := dumpCollector(ctx, addr)
if err != nil {
if status.Code(err) == codes.Unimplemented {
log.Printf("backfill: %s: collector does not support DumpSnapshots (old binary), skipping", addr)
} else {
log.Printf("backfill: %s: failed after %s: %v", addr, time.Since(start).Round(time.Millisecond), err)
}
ch <- result{}
return
}
var fineEntries, coarseEntries int
for _, s := range fine {
fineEntries += len(s.Entries)
}
for _, s := range coarse {
coarseEntries += len(s.Entries)
}
log.Printf("backfill: %s: %d fine buckets (%d entries) + %d coarse buckets (%d entries) in %s",
addr, len(fine), fineEntries, len(coarse), coarseEntries, time.Since(start).Round(time.Millisecond))
ch <- result{fine, coarse}
}()
}
// Collect per-timestamp maps: unix-minute → label → total count.
fineByTS := make(map[int64]map[string]int64)
coarseByTS := make(map[int64]map[string]int64)
for range collectorAddrs {
r := <-ch
mergeDump(r.fine, fineByTS, time.Minute)
mergeDump(r.coarse, coarseByTS, 5*time.Minute)
}
mergeStart := time.Now()
fine := buildSnapshots(fineByTS, st.FineTopK, st.FineRingSize)
coarse := buildSnapshots(coarseByTS, st.CoarseTopK, st.CoarseRingSize)
log.Printf("backfill: merge+topk took %s", time.Since(mergeStart).Round(time.Microsecond))
if len(fine)+len(coarse) == 0 {
log.Printf("backfill: no data received from any collector")
return
}
loadStart := time.Now()
cache.LoadHistorical(fine, coarse)
log.Printf("backfill: loaded %d fine + %d coarse buckets in %s",
len(fine), len(coarse), time.Since(loadStart).Round(time.Microsecond))
}
// dumpCollector calls DumpSnapshots on one collector and returns the fine and
// coarse ring snapshots as separate slices.
func dumpCollector(ctx context.Context, addr string) (fine, coarse []st.Snapshot, err error) {
conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return nil, nil, err
}
defer conn.Close()
client := pb.NewLogtailServiceClient(conn)
stream, err := client.DumpSnapshots(ctx, &pb.DumpSnapshotsRequest{})
if err != nil {
return nil, nil, err
}
for {
msg, err := stream.Recv()
if err == io.EOF {
return fine, coarse, nil
}
if err != nil {
return fine, coarse, err
}
snap := st.Snapshot{
Timestamp: time.Unix(msg.Timestamp, 0),
Entries: pbEntriesToStore(msg.Entries),
}
if msg.IsCoarse {
coarse = append(coarse, snap)
} else {
fine = append(fine, snap)
}
}
}
// mergeDump adds all snapshots from one collector's dump into the per-timestamp
// accumulator map. Multiple collectors' entries for the same timestamp are summed.
// granularity should match the ring bucket size (time.Minute for fine, 5*time.Minute for coarse).
func mergeDump(snaps []st.Snapshot, byTS map[int64]map[string]int64, granularity time.Duration) {
for _, snap := range snaps {
ts := snap.Timestamp.Truncate(granularity).Unix()
m := byTS[ts]
if m == nil {
m = make(map[string]int64, len(snap.Entries))
byTS[ts] = m
}
for _, e := range snap.Entries {
m[e.Label] += e.Count
}
}
}
// buildSnapshots sorts the per-timestamp map chronologically, runs TopK on each
// bucket, and returns a slice capped to ringSize oldest-first snapshots.
func buildSnapshots(byTS map[int64]map[string]int64, topK, ringSize int) []st.Snapshot {
if len(byTS) == 0 {
return nil
}
timestamps := make([]int64, 0, len(byTS))
for ts := range byTS {
timestamps = append(timestamps, ts)
}
sort.Slice(timestamps, func(i, j int) bool { return timestamps[i] < timestamps[j] })
// Keep only the most recent ringSize buckets.
if len(timestamps) > ringSize {
timestamps = timestamps[len(timestamps)-ringSize:]
}
snaps := make([]st.Snapshot, len(timestamps))
for i, ts := range timestamps {
snaps[i] = st.Snapshot{
Timestamp: time.Unix(ts, 0),
Entries: st.TopKFromMap(byTS[ts], topK),
}
}
return snaps
}
func pbEntriesToStore(entries []*pb.TopNEntry) []st.Entry {
out := make([]st.Entry, len(entries))
for i, e := range entries {
out[i] = st.Entry{Label: e.Label, Count: e.Count}
}
return out
}

View File

@@ -90,6 +90,26 @@ func (c *Cache) mergeFineBuckets(now time.Time) st.Snapshot {
return st.Snapshot{Timestamp: now, Entries: st.TopKFromMap(merged, st.CoarseTopK)} return st.Snapshot{Timestamp: now, Entries: st.TopKFromMap(merged, st.CoarseTopK)}
} }
// LoadHistorical pre-populates the ring buffers from backfill data before live
// streaming begins. fine and coarse must be sorted oldest-first; each slice
// must not exceed the respective ring size. Called once at startup, before Run.
func (c *Cache) LoadHistorical(fine, coarse []st.Snapshot) {
c.mu.Lock()
defer c.mu.Unlock()
for i, snap := range fine {
c.fineRing[i] = snap
}
c.fineFilled = len(fine)
c.fineHead = len(fine) % st.FineRingSize
for i, snap := range coarse {
c.coarseRing[i] = snap
}
c.coarseFilled = len(coarse)
c.coarseHead = len(coarse) % st.CoarseRingSize
}
// QueryTopN answers a TopN request from the ring buffers. // QueryTopN answers a TopN request from the ring buffers.
func (c *Cache) QueryTopN(filter *pb.Filter, groupBy pb.GroupBy, n int, window pb.Window) []st.Entry { func (c *Cache) QueryTopN(filter *pb.Filter, groupBy pb.GroupBy, n int, window pb.Window) []st.Entry {
cf := st.CompileFilter(filter) cf := st.CompileFilter(filter)

View File

@@ -15,13 +15,13 @@ import (
) )
func main() { func main() {
listen := flag.String("listen", ":9091", "gRPC listen address") listen := flag.String("listen", envOr("AGGREGATOR_LISTEN", ":9091"), "gRPC listen address (env: AGGREGATOR_LISTEN)")
collectors := flag.String("collectors", "", "comma-separated collector host:port addresses") collectors := flag.String("collectors", envOr("AGGREGATOR_COLLECTORS", ""), "comma-separated collector host:port addresses (env: AGGREGATOR_COLLECTORS)")
source := flag.String("source", hostname(), "name for this aggregator in responses") source := flag.String("source", envOr("AGGREGATOR_SOURCE", hostname()), "name for this aggregator in responses (env: AGGREGATOR_SOURCE, default: hostname)")
flag.Parse() flag.Parse()
if *collectors == "" { if *collectors == "" {
log.Fatal("aggregator: --collectors is required") log.Fatal("aggregator: --collectors / AGGREGATOR_COLLECTORS is required")
} }
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
@@ -38,13 +38,6 @@ func main() {
merger := NewMerger() merger := NewMerger()
cache := NewCache(merger, *source) cache := NewCache(merger, *source)
registry := NewTargetRegistry(collectorAddrs) registry := NewTargetRegistry(collectorAddrs)
go cache.Run(ctx)
for _, addr := range collectorAddrs {
sub := NewCollectorSub(addr, merger, registry)
go sub.Run(ctx)
log.Printf("aggregator: subscribing to collector %s", addr)
}
lis, err := net.Listen("tcp", *listen) lis, err := net.Listen("tcp", *listen)
if err != nil { if err != nil {
@@ -60,6 +53,17 @@ func main() {
} }
}() }()
go cache.Run(ctx)
for _, addr := range collectorAddrs {
sub := NewCollectorSub(addr, merger, registry)
go sub.Run(ctx)
log.Printf("aggregator: subscribing to collector %s", addr)
}
log.Printf("aggregator: backfilling from %d collector(s)", len(collectorAddrs))
go Backfill(ctx, collectorAddrs, cache)
<-ctx.Done() <-ctx.Done()
log.Printf("aggregator: shutting down") log.Printf("aggregator: shutting down")
grpcServer.GracefulStop() grpcServer.GracefulStop()
@@ -72,3 +76,10 @@ func hostname() string {
} }
return h return h
} }
func envOr(key, def string) string {
if v := os.Getenv(key); v != "" {
return v
}
return def
}

View File

@@ -8,6 +8,7 @@ import (
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb" pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
) )
@@ -64,15 +65,23 @@ func (srv *Server) ListTargets(_ context.Context, _ *pb.ListTargetsRequest) (*pb
return resp, nil return resp, nil
} }
func peerAddr(ctx context.Context) string {
if p, ok := peer.FromContext(ctx); ok {
return p.Addr.String()
}
return "unknown"
}
func (srv *Server) StreamSnapshots(_ *pb.SnapshotRequest, stream grpc.ServerStreamingServer[pb.Snapshot]) error { func (srv *Server) StreamSnapshots(_ *pb.SnapshotRequest, stream grpc.ServerStreamingServer[pb.Snapshot]) error {
ch := srv.cache.Subscribe() ch := srv.cache.Subscribe()
defer srv.cache.Unsubscribe(ch) defer srv.cache.Unsubscribe(ch)
log.Printf("server: new StreamSnapshots subscriber") addr := peerAddr(stream.Context())
log.Printf("server: new StreamSnapshots subscriber from %s", addr)
for { for {
select { select {
case <-stream.Context().Done(): case <-stream.Context().Done():
log.Printf("server: StreamSnapshots subscriber disconnected") log.Printf("server: StreamSnapshots subscriber disconnected: %s", addr)
return nil return nil
case snap, ok := <-ch: case snap, ok := <-ch:
if !ok { if !ok {

View File

@@ -89,7 +89,10 @@ func TestBuildFilterNil(t *testing.T) {
} }
func TestFmtCount(t *testing.T) { func TestFmtCount(t *testing.T) {
cases := []struct{ n int64; want string }{ cases := []struct {
n int64
want string
}{
{0, "0"}, {0, "0"},
{999, "999"}, {999, "999"},
{1000, "1 000"}, {1000, "1 000"},

View File

@@ -33,6 +33,10 @@ func runTopN(args []string) {
results := fanOutTopN(sf.targets, filter, grp, *n, win) results := fanOutTopN(sf.targets, filter, grp, *n, win)
if sf.jsonOut {
printTopNJSONArray(results)
return
}
for _, r := range results { for _, r := range results {
if hdr := targetHeader(r.target, r.resp.GetSource(), len(sf.targets)); hdr != "" { if hdr := targetHeader(r.target, r.resp.GetSource(), len(sf.targets)); hdr != "" {
fmt.Println(hdr) fmt.Println(hdr)
@@ -41,11 +45,7 @@ func runTopN(args []string) {
fmt.Fprintf(os.Stderr, "error from %s: %v\n", r.target, r.err) fmt.Fprintf(os.Stderr, "error from %s: %v\n", r.target, r.err)
continue continue
} }
if sf.jsonOut { printTopNTable(r)
printTopNJSON(r)
} else {
printTopNTable(r)
}
if len(sf.targets) > 1 { if len(sf.targets) > 1 {
fmt.Println() fmt.Println()
} }
@@ -99,7 +99,7 @@ func printTopNTable(r topNResult) {
printTable(os.Stdout, rows) printTable(os.Stdout, rows)
} }
func printTopNJSON(r topNResult) { func printTopNJSONArray(results []topNResult) {
type entry struct { type entry struct {
Label string `json:"label"` Label string `json:"label"`
Count int64 `json:"count"` Count int64 `json:"count"`
@@ -109,14 +109,22 @@ func printTopNJSON(r topNResult) {
Target string `json:"target"` Target string `json:"target"`
Entries []entry `json:"entries"` Entries []entry `json:"entries"`
} }
o := out{ rows := make([]out, 0, len(results))
Source: r.resp.Source, for _, r := range results {
Target: r.target, if r.err != nil {
Entries: make([]entry, len(r.resp.Entries)), fmt.Fprintf(os.Stderr, "error from %s: %v\n", r.target, r.err)
continue
}
o := out{
Source: r.resp.Source,
Target: r.target,
Entries: make([]entry, len(r.resp.Entries)),
}
for i, e := range r.resp.Entries {
o.Entries[i] = entry{Label: e.Label, Count: e.Count}
}
rows = append(rows, o)
} }
for i, e := range r.resp.Entries { b, _ := json.Marshal(rows)
o.Entries[i] = entry{Label: e.Label, Count: e.Count}
}
b, _ := json.Marshal(o)
fmt.Println(string(b)) fmt.Println(string(b))
} }

View File

@@ -30,6 +30,10 @@ func runTrend(args []string) {
results := fanOutTrend(sf.targets, filter, win) results := fanOutTrend(sf.targets, filter, win)
if sf.jsonOut {
printTrendJSONArray(results)
return
}
for _, r := range results { for _, r := range results {
if hdr := targetHeader(r.target, r.resp.GetSource(), len(sf.targets)); hdr != "" { if hdr := targetHeader(r.target, r.resp.GetSource(), len(sf.targets)); hdr != "" {
fmt.Println(hdr) fmt.Println(hdr)
@@ -38,11 +42,7 @@ func runTrend(args []string) {
fmt.Fprintf(os.Stderr, "error from %s: %v\n", r.target, r.err) fmt.Fprintf(os.Stderr, "error from %s: %v\n", r.target, r.err)
continue continue
} }
if sf.jsonOut { printTrendTable(r)
printTrendJSON(r)
} else {
printTrendTable(r)
}
if len(sf.targets) > 1 { if len(sf.targets) > 1 {
fmt.Println() fmt.Println()
} }
@@ -90,7 +90,7 @@ func printTrendTable(r trendResult) {
printTable(os.Stdout, rows) printTable(os.Stdout, rows)
} }
func printTrendJSON(r trendResult) { func printTrendJSONArray(results []trendResult) {
type point struct { type point struct {
Ts int64 `json:"ts"` Ts int64 `json:"ts"`
Count int64 `json:"count"` Count int64 `json:"count"`
@@ -100,14 +100,22 @@ func printTrendJSON(r trendResult) {
Target string `json:"target"` Target string `json:"target"`
Points []point `json:"points"` Points []point `json:"points"`
} }
o := out{ rows := make([]out, 0, len(results))
Source: r.resp.Source, for _, r := range results {
Target: r.target, if r.err != nil {
Points: make([]point, len(r.resp.Points)), fmt.Fprintf(os.Stderr, "error from %s: %v\n", r.target, r.err)
continue
}
o := out{
Source: r.resp.Source,
Target: r.target,
Points: make([]point, len(r.resp.Points)),
}
for i, p := range r.resp.Points {
o.Points[i] = point{Ts: p.TimestampUnix, Count: p.Count}
}
rows = append(rows, o)
} }
for i, p := range r.resp.Points { b, _ := json.Marshal(rows)
o.Points[i] = point{Ts: p.TimestampUnix, Count: p.Count}
}
b, _ := json.Marshal(o)
fmt.Println(string(b)) fmt.Println(string(b))
} }

View File

@@ -12,14 +12,18 @@ import (
// sharedFlags holds the flags common to every subcommand. // sharedFlags holds the flags common to every subcommand.
type sharedFlags struct { type sharedFlags struct {
targets []string targets []string
jsonOut bool jsonOut bool
website string website string
prefix string prefix string
uri string uri string
status string // expression: "200", "!=200", ">=400", etc. status string // expression: "200", "!=200", ">=400", etc.
websiteRe string // RE2 regex against website websiteRe string // RE2 regex against website
uriRe string // RE2 regex against request URI uriRe string // RE2 regex against request URI
websiteReNeg string // RE2 regex exclusion against website
uriReNeg string // RE2 regex exclusion against request URI
isTor string // "", "1" / "!=0" (TOR only), "0" / "!=1" (non-TOR only)
asn string // expression: "12345", "!=65000", ">=1000", etc.
} }
// bindShared registers the shared flags on fs and returns a pointer to the // bindShared registers the shared flags on fs and returns a pointer to the
@@ -34,6 +38,10 @@ func bindShared(fs *flag.FlagSet) (*sharedFlags, *string) {
fs.StringVar(&sf.status, "status", "", "filter: HTTP status expression (200, !=200, >=400, <500, …)") fs.StringVar(&sf.status, "status", "", "filter: HTTP status expression (200, !=200, >=400, <500, …)")
fs.StringVar(&sf.websiteRe, "website-re", "", "filter: RE2 regex against website") fs.StringVar(&sf.websiteRe, "website-re", "", "filter: RE2 regex against website")
fs.StringVar(&sf.uriRe, "uri-re", "", "filter: RE2 regex against request URI") fs.StringVar(&sf.uriRe, "uri-re", "", "filter: RE2 regex against request URI")
fs.StringVar(&sf.websiteReNeg, "website-re-neg", "", "filter: RE2 regex exclusion against website")
fs.StringVar(&sf.uriReNeg, "uri-re-neg", "", "filter: RE2 regex exclusion against request URI")
fs.StringVar(&sf.isTor, "is-tor", "", "filter: TOR traffic (1 or !=0 = TOR only; 0 or !=1 = non-TOR only)")
fs.StringVar(&sf.asn, "asn", "", "filter: ASN expression (12345, !=65000, >=1000, <64512, …)")
return sf, target return sf, target
} }
@@ -56,7 +64,7 @@ func parseTargets(s string) []string {
} }
func buildFilter(sf *sharedFlags) *pb.Filter { func buildFilter(sf *sharedFlags) *pb.Filter {
if sf.website == "" && sf.prefix == "" && sf.uri == "" && sf.status == "" && sf.websiteRe == "" && sf.uriRe == "" { if sf.website == "" && sf.prefix == "" && sf.uri == "" && sf.status == "" && sf.websiteRe == "" && sf.uriRe == "" && sf.websiteReNeg == "" && sf.uriReNeg == "" && sf.isTor == "" && sf.asn == "" {
return nil return nil
} }
f := &pb.Filter{} f := &pb.Filter{}
@@ -84,6 +92,32 @@ func buildFilter(sf *sharedFlags) *pb.Filter {
if sf.uriRe != "" { if sf.uriRe != "" {
f.UriRegex = &sf.uriRe f.UriRegex = &sf.uriRe
} }
if sf.websiteReNeg != "" {
f.WebsiteRegexExclude = &sf.websiteReNeg
}
if sf.uriReNeg != "" {
f.UriRegexExclude = &sf.uriReNeg
}
switch sf.isTor {
case "1", "!=0":
f.Tor = pb.TorFilter_TOR_YES
case "0", "!=1":
f.Tor = pb.TorFilter_TOR_NO
case "":
// no filter
default:
fmt.Fprintf(os.Stderr, "--is-tor: invalid value %q; use 1, 0, !=0, or !=1\n", sf.isTor)
os.Exit(1)
}
if sf.asn != "" {
n, op, ok := st.ParseStatusExpr(sf.asn)
if !ok {
fmt.Fprintf(os.Stderr, "--asn: invalid expression %q; use e.g. 12345, !=65000, >=1000, <64512\n", sf.asn)
os.Exit(1)
}
f.AsnNumber = &n
f.AsnOp = op
}
return f return f
} }

View File

@@ -6,35 +6,35 @@ import (
"flag" "flag"
"log" "log"
"net" "net"
"net/http"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"syscall" "syscall"
"time"
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb" pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
func main() { func main() {
listen := flag.String("listen", ":9090", "gRPC listen address") listen := flag.String("listen", envOr("COLLECTOR_LISTEN", ":9090"), "gRPC listen address (env: COLLECTOR_LISTEN)")
logPaths := flag.String("logs", "", "comma-separated log file paths/globs to tail") promListen := flag.String("prom-listen", envOr("COLLECTOR_PROM_LISTEN", ":9100"), "Prometheus metrics listen address, empty to disable (env: COLLECTOR_PROM_LISTEN)")
logsFile := flag.String("logs-file", "", "file containing one log path/glob per line") logPaths := flag.String("logs", envOr("COLLECTOR_LOGS", ""), "comma-separated log file paths/globs to tail (env: COLLECTOR_LOGS)")
source := flag.String("source", hostname(), "name for this collector (default: hostname)") logsFile := flag.String("logs-file", envOr("COLLECTOR_LOGS_FILE", ""), "file containing one log path/glob per line (env: COLLECTOR_LOGS_FILE)")
v4prefix := flag.Int("v4prefix", 24, "IPv4 prefix length for client bucketing") source := flag.String("source", envOr("COLLECTOR_SOURCE", hostname()), "name for this collector (env: COLLECTOR_SOURCE, default: hostname)")
v6prefix := flag.Int("v6prefix", 48, "IPv6 prefix length for client bucketing") v4prefix := flag.Int("v4prefix", envOrInt("COLLECTOR_V4PREFIX", 24), "IPv4 prefix length for client bucketing (env: COLLECTOR_V4PREFIX)")
v6prefix := flag.Int("v6prefix", envOrInt("COLLECTOR_V6PREFIX", 48), "IPv6 prefix length for client bucketing (env: COLLECTOR_V6PREFIX)")
scanInterval := flag.Duration("scan-interval", envOrDuration("COLLECTOR_SCAN_INTERVAL", 10*time.Second), "how often to rescan glob patterns for new/removed files (env: COLLECTOR_SCAN_INTERVAL)")
flag.Parse() flag.Parse()
patterns := collectPatterns(*logPaths, *logsFile) patterns := collectPatterns(*logPaths, *logsFile)
if len(patterns) == 0 { if len(patterns) == 0 {
log.Fatal("collector: no log paths specified; use --logs or --logs-file") log.Fatal("collector: no log paths specified; use --logs or --logs-file")
} }
log.Printf("collector: watching %d pattern(s), rescan every %s", len(patterns), *scanInterval)
paths := expandGlobs(patterns)
if len(paths) == 0 {
log.Fatal("collector: no log files matched the specified patterns")
}
log.Printf("collector: tailing %d file(s)", len(paths))
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop() defer stop()
@@ -43,9 +43,21 @@ func main() {
ch := make(chan LogRecord, 200_000) ch := make(chan LogRecord, 200_000)
store := NewStore(*source) store := NewStore(*source)
if *promListen != "" {
ps := NewPromStore()
store.prom = ps
mux := http.NewServeMux()
mux.Handle("/metrics", ps)
go func() {
log.Printf("collector: Prometheus metrics on %s/metrics", *promListen)
if err := http.ListenAndServe(*promListen, mux); err != nil {
log.Fatalf("collector: Prometheus server: %v", err)
}
}()
}
go store.Run(ch) go store.Run(ch)
tailer := NewMultiTailer(paths, *v4prefix, *v6prefix, ch) tailer := NewMultiTailer(patterns, *scanInterval, *v4prefix, *v6prefix, ch)
go tailer.Run(ctx) go tailer.Run(ctx)
lis, err := net.Listen("tcp", *listen) lis, err := net.Listen("tcp", *listen)
@@ -64,7 +76,22 @@ func main() {
<-ctx.Done() <-ctx.Done()
log.Printf("collector: shutting down") log.Printf("collector: shutting down")
grpcServer.GracefulStop()
// GracefulStop waits for all RPCs to finish. StreamSnapshots subscribers
// (e.g. the aggregator) hold a stream open indefinitely, so we give it a
// short window and then force-stop to avoid hanging systemctl stop/restart.
stopped := make(chan struct{})
go func() {
grpcServer.GracefulStop()
close(stopped)
}()
select {
case <-stopped:
case <-time.After(5 * time.Second):
log.Printf("collector: graceful stop timed out, forcing stop")
grpcServer.Stop()
}
close(ch) close(ch)
} }
@@ -128,3 +155,30 @@ func hostname() string {
} }
return h return h
} }
func envOr(key, def string) string {
if v := os.Getenv(key); v != "" {
return v
}
return def
}
func envOrInt(key string, def int) int {
if v := os.Getenv(key); v != "" {
if n, err := strconv.Atoi(v); err == nil {
return n
}
log.Printf("collector: invalid int for %s=%q, using default %d", key, v, def)
}
return def
}
func envOrDuration(key string, def time.Duration) time.Duration {
if v := os.Getenv(key); v != "" {
if d, err := time.ParseDuration(v); err == nil {
return d
}
log.Printf("collector: invalid duration for %s=%q, using default %s", key, v, def)
}
return def
}

View File

@@ -3,25 +3,34 @@ package main
import ( import (
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
) )
// LogRecord holds the four dimensions extracted from a single nginx log line. // LogRecord holds the dimensions extracted from a single nginx log line.
type LogRecord struct { type LogRecord struct {
Website string Website string
ClientPrefix string ClientPrefix string
URI string URI string
Status string Status string
IsTor bool
ASN int32
Method string
BodyBytesSent int64
RequestTime float64
} }
// ParseLine parses a tab-separated logtail log line: // ParseLine parses a tab-separated logtail log line:
// //
// $host \t $remote_addr \t $msec \t $request_method \t $request_uri \t $status \t $body_bytes_sent \t $request_time // $host \t $remote_addr \t $msec \t $request_method \t $request_uri \t $status \t $body_bytes_sent \t $request_time \t $is_tor \t $asn
// //
// The is_tor (field 9) and asn (field 10) fields are optional for backward
// compatibility with older log files that omit them; they default to false/0
// when absent.
// Returns false for lines with fewer than 8 fields. // Returns false for lines with fewer than 8 fields.
func ParseLine(line string, v4bits, v6bits int) (LogRecord, bool) { func ParseLine(line string, v4bits, v6bits int) (LogRecord, bool) {
// SplitN caps allocations; we need exactly 8 fields. // SplitN caps allocations; we need up to 10 fields.
fields := strings.SplitN(line, "\t", 8) fields := strings.SplitN(line, "\t", 10)
if len(fields) < 8 { if len(fields) < 8 {
return LogRecord{}, false return LogRecord{}, false
} }
@@ -36,11 +45,35 @@ func ParseLine(line string, v4bits, v6bits int) (LogRecord, bool) {
return LogRecord{}, false return LogRecord{}, false
} }
isTor := len(fields) >= 9 && fields[8] == "1"
var asn int32
if len(fields) == 10 {
if n, err := strconv.ParseInt(fields[9], 10, 32); err == nil {
asn = int32(n)
}
}
var bodyBytes int64
if n, err := strconv.ParseInt(fields[6], 10, 64); err == nil {
bodyBytes = n
}
var reqTime float64
if f, err := strconv.ParseFloat(fields[7], 64); err == nil {
reqTime = f
}
return LogRecord{ return LogRecord{
Website: fields[0], Website: fields[0],
ClientPrefix: prefix, ClientPrefix: prefix,
URI: uri, URI: uri,
Status: fields[5], Status: fields[5],
IsTor: isTor,
ASN: asn,
Method: fields[3],
BodyBytesSent: bodyBytes,
RequestTime: reqTime,
}, true }, true
} }

View File

@@ -8,20 +8,23 @@ func TestParseLine(t *testing.T) {
good := "www.example.com\t1.2.3.4\t1741954800.123\tGET\t/api/v1/search?q=foo&x=1\t200\t1452\t0.043" good := "www.example.com\t1.2.3.4\t1741954800.123\tGET\t/api/v1/search?q=foo&x=1\t200\t1452\t0.043"
tests := []struct { tests := []struct {
name string name string
line string line string
wantOK bool wantOK bool
want LogRecord want LogRecord
}{ }{
{ {
name: "normal IPv4 line strips query string", name: "normal IPv4 line strips query string",
line: good, line: good,
wantOK: true, wantOK: true,
want: LogRecord{ want: LogRecord{
Website: "www.example.com", Website: "www.example.com",
ClientPrefix: "1.2.3.0/24", ClientPrefix: "1.2.3.0/24",
URI: "/api/v1/search", URI: "/api/v1/search",
Status: "200", Status: "200",
Method: "GET",
BodyBytesSent: 1452,
RequestTime: 0.043,
}, },
}, },
{ {
@@ -33,6 +36,8 @@ func TestParseLine(t *testing.T) {
ClientPrefix: "10.0.0.0/24", ClientPrefix: "10.0.0.0/24",
URI: "/submit", URI: "/submit",
Status: "201", Status: "201",
Method: "POST",
RequestTime: 0.001,
}, },
}, },
{ {
@@ -44,6 +49,8 @@ func TestParseLine(t *testing.T) {
ClientPrefix: "2001:db8:cafe::/48", // /48 = 3 full 16-bit groups intact ClientPrefix: "2001:db8:cafe::/48", // /48 = 3 full 16-bit groups intact
URI: "/", URI: "/",
Status: "200", Status: "200",
Method: "GET",
RequestTime: 0.001,
}, },
}, },
{ {
@@ -70,6 +77,110 @@ func TestParseLine(t *testing.T) {
ClientPrefix: "5.6.7.0/24", ClientPrefix: "5.6.7.0/24",
URI: "/rate-limited", URI: "/rate-limited",
Status: "429", Status: "429",
Method: "GET",
RequestTime: 0.001,
},
},
{
name: "is_tor=1 sets IsTor true",
line: "tor.example.com\t1.2.3.4\t0\tGET\t/\t200\t0\t0.001\t1",
wantOK: true,
want: LogRecord{
Website: "tor.example.com",
ClientPrefix: "1.2.3.0/24",
URI: "/",
Status: "200",
IsTor: true,
Method: "GET",
RequestTime: 0.001,
},
},
{
name: "is_tor=0 sets IsTor false",
line: "normal.example.com\t1.2.3.4\t0\tGET\t/\t200\t0\t0.001\t0",
wantOK: true,
want: LogRecord{
Website: "normal.example.com",
ClientPrefix: "1.2.3.0/24",
URI: "/",
Status: "200",
IsTor: false,
Method: "GET",
RequestTime: 0.001,
},
},
{
name: "missing is_tor field defaults to false (backward compat)",
line: "old.example.com\t1.2.3.4\t0\tGET\t/\t200\t0\t0.001",
wantOK: true,
want: LogRecord{
Website: "old.example.com",
ClientPrefix: "1.2.3.0/24",
URI: "/",
Status: "200",
IsTor: false,
Method: "GET",
RequestTime: 0.001,
},
},
{
name: "asn field parsed",
line: "asn.example.com\t1.2.3.4\t0\tGET\t/\t200\t0\t0.001\t0\t12345",
wantOK: true,
want: LogRecord{
Website: "asn.example.com",
ClientPrefix: "1.2.3.0/24",
URI: "/",
Status: "200",
IsTor: false,
ASN: 12345,
Method: "GET",
RequestTime: 0.001,
},
},
{
name: "asn field with is_tor=1",
line: "both.example.com\t1.2.3.4\t0\tGET\t/\t200\t0\t0.001\t1\t65535",
wantOK: true,
want: LogRecord{
Website: "both.example.com",
ClientPrefix: "1.2.3.0/24",
URI: "/",
Status: "200",
IsTor: true,
ASN: 65535,
Method: "GET",
RequestTime: 0.001,
},
},
{
name: "missing asn field defaults to 0 (backward compat)",
line: "noasn.example.com\t1.2.3.4\t0\tGET\t/\t200\t0\t0.001\t1",
wantOK: true,
want: LogRecord{
Website: "noasn.example.com",
ClientPrefix: "1.2.3.0/24",
URI: "/",
Status: "200",
IsTor: true,
ASN: 0,
Method: "GET",
RequestTime: 0.001,
},
},
{
name: "invalid asn field defaults to 0",
line: "badann.example.com\t1.2.3.4\t0\tGET\t/\t200\t0\t0.001\t0\tnot-a-number",
wantOK: true,
want: LogRecord{
Website: "badann.example.com",
ClientPrefix: "1.2.3.0/24",
URI: "/",
Status: "200",
IsTor: false,
ASN: 0,
Method: "GET",
RequestTime: 0.001,
}, },
}, },
} }
@@ -98,7 +209,7 @@ func TestTruncateIP(t *testing.T) {
{"1.2.3.4", "1.2.3.0/24"}, {"1.2.3.4", "1.2.3.0/24"},
{"192.168.100.200", "192.168.100.0/24"}, {"192.168.100.200", "192.168.100.0/24"},
{"2001:db8:cafe:babe::1", "2001:db8:cafe::/48"}, // /48 = 3 full groups intact {"2001:db8:cafe:babe::1", "2001:db8:cafe::/48"}, // /48 = 3 full groups intact
{"::1", "::/48"}, // loopback — first 48 bits are all zero {"::1", "::/48"}, // loopback — first 48 bits are all zero
} }
for _, tc := range tests { for _, tc := range tests {

209
cmd/collector/prom.go Normal file
View File

@@ -0,0 +1,209 @@
package main
import (
"bufio"
"fmt"
"net/http"
"sort"
"strings"
"sync"
)
// Body-size histogram bucket upper bounds in bytes.
const promNumBodyBounds = 7
var promBodyBounds = [promNumBodyBounds]int64{256, 1024, 4096, 16384, 65536, 262144, 1048576}
// Request-time histogram bucket upper bounds in seconds (standard Prometheus defaults).
const promNumTimeBounds = 11
var promTimeBounds = [promNumTimeBounds]float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
const promCounterCap = 100_000 // safety cap on {host,method,status} counter entries
// promCounterKey is the label set for per-request counters.
type promCounterKey struct {
Host string
Method string
Status string
}
// promBodyEntry holds the body_bytes_sent histogram for one host.
type promBodyEntry struct {
buckets [promNumBodyBounds + 1]int64 // indices 0..N-1: le=bound[i]; index N: le=+Inf
sum int64
}
// promTimeEntry holds the request_time histogram for one host.
type promTimeEntry struct {
buckets [promNumTimeBounds + 1]int64
sum float64
}
// PromStore accumulates Prometheus metrics ingested from log records.
//
// Ingest must be called from exactly one goroutine (the store's Run goroutine).
// ServeHTTP may be called from any number of goroutines concurrently.
type PromStore struct {
mu sync.Mutex
counters map[promCounterKey]int64
body map[string]*promBodyEntry // keyed by host
reqTime map[string]*promTimeEntry // keyed by host
}
// NewPromStore returns an empty PromStore ready for use.
func NewPromStore() *PromStore {
return &PromStore{
counters: make(map[promCounterKey]int64, 1024),
body: make(map[string]*promBodyEntry, 64),
reqTime: make(map[string]*promTimeEntry, 64),
}
}
// Ingest records one log record into the Prometheus metrics.
// Must be called from a single goroutine.
func (p *PromStore) Ingest(r LogRecord) {
p.mu.Lock()
// --- per-{host,method,status} request counter ---
ck := promCounterKey{Host: r.Website, Method: r.Method, Status: r.Status}
if _, ok := p.counters[ck]; ok {
p.counters[ck]++
} else if len(p.counters) < promCounterCap {
p.counters[ck] = 1
}
// --- body_bytes_sent histogram (keyed by host only) ---
be, ok := p.body[r.Website]
if !ok {
be = &promBodyEntry{}
p.body[r.Website] = be
}
for i, bound := range promBodyBounds {
if r.BodyBytesSent <= bound {
be.buckets[i]++
}
}
be.buckets[promNumBodyBounds]++ // +Inf
be.sum += r.BodyBytesSent
// --- request_time histogram (keyed by host only) ---
te, ok := p.reqTime[r.Website]
if !ok {
te = &promTimeEntry{}
p.reqTime[r.Website] = te
}
for i, bound := range promTimeBounds {
if r.RequestTime <= bound {
te.buckets[i]++
}
}
te.buckets[promNumTimeBounds]++ // +Inf
te.sum += r.RequestTime
p.mu.Unlock()
}
// ServeHTTP renders all metrics in the Prometheus text exposition format (0.0.4).
func (p *PromStore) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
// Snapshot everything under the lock, then render without holding it.
p.mu.Lock()
type counterSnap struct {
k promCounterKey
v int64
}
counters := make([]counterSnap, 0, len(p.counters))
for k, v := range p.counters {
counters = append(counters, counterSnap{k, v})
}
type bodySnap struct {
host string
e promBodyEntry
}
bodySnaps := make([]bodySnap, 0, len(p.body))
for h, e := range p.body {
bodySnaps = append(bodySnaps, bodySnap{h, *e})
}
type timeSnap struct {
host string
e promTimeEntry
}
timeSnaps := make([]timeSnap, 0, len(p.reqTime))
for h, e := range p.reqTime {
timeSnaps = append(timeSnaps, timeSnap{h, *e})
}
p.mu.Unlock()
// Sort for stable, human-readable output.
sort.Slice(counters, func(i, j int) bool {
a, b := counters[i].k, counters[j].k
if a.Host != b.Host {
return a.Host < b.Host
}
if a.Method != b.Method {
return a.Method < b.Method
}
return a.Status < b.Status
})
sort.Slice(bodySnaps, func(i, j int) bool { return bodySnaps[i].host < bodySnaps[j].host })
sort.Slice(timeSnaps, func(i, j int) bool { return timeSnaps[i].host < timeSnaps[j].host })
w.Header().Set("Content-Type", "text/plain; version=0.0.4; charset=utf-8")
bw := bufio.NewWriterSize(w, 256*1024)
// nginx_http_requests_total
fmt.Fprintln(bw, "# HELP nginx_http_requests_total Total number of HTTP requests processed.")
fmt.Fprintln(bw, "# TYPE nginx_http_requests_total counter")
for _, c := range counters {
fmt.Fprintf(bw, "nginx_http_requests_total{host=%q,method=%q,status=%q} %d\n",
c.k.Host, c.k.Method, c.k.Status, c.v)
}
// nginx_http_response_body_bytes (histogram, labeled by host)
fmt.Fprintln(bw, "# HELP nginx_http_response_body_bytes HTTP response body size distribution in bytes.")
fmt.Fprintln(bw, "# TYPE nginx_http_response_body_bytes histogram")
for _, s := range bodySnaps {
for i, bound := range promBodyBounds {
fmt.Fprintf(bw, "nginx_http_response_body_bytes_bucket{host=%q,le=%q} %d\n",
s.host, fmt.Sprintf("%d", bound), s.e.buckets[i])
}
fmt.Fprintf(bw, "nginx_http_response_body_bytes_bucket{host=%q,le=\"+Inf\"} %d\n",
s.host, s.e.buckets[promNumBodyBounds])
fmt.Fprintf(bw, "nginx_http_response_body_bytes_count{host=%q} %d\n",
s.host, s.e.buckets[promNumBodyBounds])
fmt.Fprintf(bw, "nginx_http_response_body_bytes_sum{host=%q} %d\n",
s.host, s.e.sum)
}
// nginx_http_request_duration_seconds (histogram, labeled by host)
fmt.Fprintln(bw, "# HELP nginx_http_request_duration_seconds HTTP request processing time in seconds.")
fmt.Fprintln(bw, "# TYPE nginx_http_request_duration_seconds histogram")
for _, s := range timeSnaps {
for i, bound := range promTimeBounds {
fmt.Fprintf(bw, "nginx_http_request_duration_seconds_bucket{host=%q,le=%q} %d\n",
s.host, formatFloat(bound), s.e.buckets[i])
}
fmt.Fprintf(bw, "nginx_http_request_duration_seconds_bucket{host=%q,le=\"+Inf\"} %d\n",
s.host, s.e.buckets[promNumTimeBounds])
fmt.Fprintf(bw, "nginx_http_request_duration_seconds_count{host=%q} %d\n",
s.host, s.e.buckets[promNumTimeBounds])
fmt.Fprintf(bw, "nginx_http_request_duration_seconds_sum{host=%q} %g\n",
s.host, s.e.sum)
}
bw.Flush()
}
// formatFloat renders a float64 bucket bound without trailing zeros but always
// with at least one decimal place, matching Prometheus convention (e.g. "0.5", "10").
func formatFloat(f float64) string {
s := fmt.Sprintf("%g", f)
if !strings.Contains(s, ".") && !strings.Contains(s, "e") {
s += ".0" // ensure it looks like a float, not an integer
}
return s
}

130
cmd/collector/prom_test.go Normal file
View File

@@ -0,0 +1,130 @@
package main
import (
"net/http/httptest"
"strings"
"testing"
)
func TestPromStoreIngestBodyBuckets(t *testing.T) {
ps := NewPromStore()
// 512 bytes: > 256, ≤ 1024 → bucket[0] stays 0, buckets[1..N] get 1
ps.Ingest(LogRecord{Website: "example.com", Method: "GET", Status: "200", BodyBytesSent: 512})
ps.mu.Lock()
be := ps.body["example.com"]
ps.mu.Unlock()
if be == nil {
t.Fatal("expected body entry, got nil")
}
if be.buckets[0] != 0 { // le=256: 512 > 256
t.Errorf("le=256 bucket = %d, want 0", be.buckets[0])
}
if be.buckets[1] != 1 { // le=1024: 512 ≤ 1024
t.Errorf("le=1024 bucket = %d, want 1", be.buckets[1])
}
for i := 2; i <= promNumBodyBounds; i++ {
if be.buckets[i] != 1 {
t.Errorf("bucket[%d] = %d, want 1", i, be.buckets[i])
}
}
if be.sum != 512 {
t.Errorf("sum = %d, want 512", be.sum)
}
}
func TestPromStoreIngestTimeBuckets(t *testing.T) {
ps := NewPromStore()
// 0.075s: > 0.05, ≤ 0.1
ps.Ingest(LogRecord{Website: "example.com", Method: "GET", Status: "200", RequestTime: 0.075})
ps.mu.Lock()
te := ps.reqTime["example.com"]
ps.mu.Unlock()
if te == nil {
t.Fatal("expected time entry, got nil")
}
// le=0.05 (index 3): 0.075 > 0.05 → 0
if te.buckets[3] != 0 {
t.Errorf("le=0.05 bucket = %d, want 0", te.buckets[3])
}
// le=0.1 (index 4): 0.075 ≤ 0.1 → 1
if te.buckets[4] != 1 {
t.Errorf("le=0.1 bucket = %d, want 1", te.buckets[4])
}
// +Inf (last): always 1
if te.buckets[promNumTimeBounds] != 1 {
t.Errorf("+Inf bucket = %d, want 1", te.buckets[promNumTimeBounds])
}
}
func TestPromStoreCounter(t *testing.T) {
ps := NewPromStore()
ps.Ingest(LogRecord{Website: "a.com", Method: "GET", Status: "200"})
ps.Ingest(LogRecord{Website: "a.com", Method: "GET", Status: "200"})
ps.Ingest(LogRecord{Website: "a.com", Method: "POST", Status: "201"})
ps.mu.Lock()
c1 := ps.counters[promCounterKey{"a.com", "GET", "200"}]
c2 := ps.counters[promCounterKey{"a.com", "POST", "201"}]
ps.mu.Unlock()
if c1 != 2 {
t.Errorf("GET/200 count = %d, want 2", c1)
}
if c2 != 1 {
t.Errorf("POST/201 count = %d, want 1", c2)
}
}
func TestPromStoreServeHTTP(t *testing.T) {
ps := NewPromStore()
ps.Ingest(LogRecord{
Website: "example.com", Method: "GET", Status: "200",
BodyBytesSent: 100, RequestTime: 0.042,
})
req := httptest.NewRequest("GET", "/metrics", nil)
rec := httptest.NewRecorder()
ps.ServeHTTP(rec, req)
body := rec.Body.String()
checks := []string{
"# TYPE nginx_http_requests_total counter",
`nginx_http_requests_total{host="example.com",method="GET",status="200"} 1`,
"# TYPE nginx_http_response_body_bytes histogram",
`nginx_http_response_body_bytes_bucket{host="example.com",le="256"} 1`, // 100 ≤ 256
`nginx_http_response_body_bytes_count{host="example.com"} 1`,
`nginx_http_response_body_bytes_sum{host="example.com"} 100`,
"# TYPE nginx_http_request_duration_seconds histogram",
`nginx_http_request_duration_seconds_bucket{host="example.com",le="0.05"} 1`, // 0.042 ≤ 0.05
`nginx_http_request_duration_seconds_count{host="example.com"} 1`,
}
for _, want := range checks {
if !strings.Contains(body, want) {
t.Errorf("missing %q in output:\n%s", want, body)
}
}
}
func TestPromStoreCounterCap(t *testing.T) {
ps := NewPromStore()
// Fill to cap with distinct {host,method,status} combos
for i := 0; i < promCounterCap+10; i++ {
host := strings.Repeat("x", i%10+1) + ".com"
status := "200"
if i%3 == 0 {
status = "404"
}
ps.Ingest(LogRecord{Website: host, Method: "GET", Status: status})
}
ps.mu.Lock()
n := len(ps.counters)
ps.mu.Unlock()
if n > promCounterCap {
t.Errorf("counter map size %d exceeds cap %d", n, promCounterCap)
}
}

View File

@@ -5,9 +5,11 @@ import (
"log" "log"
"time" "time"
st "git.ipng.ch/ipng/nginx-logtail/internal/store"
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb" pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
) )
@@ -62,15 +64,50 @@ func (srv *Server) ListTargets(_ context.Context, _ *pb.ListTargetsRequest) (*pb
}, nil }, nil
} }
func peerAddr(ctx context.Context) string {
if p, ok := peer.FromContext(ctx); ok {
return p.Addr.String()
}
return "unknown"
}
func (srv *Server) DumpSnapshots(_ *pb.DumpSnapshotsRequest, stream grpc.ServerStreamingServer[pb.Snapshot]) error {
fine, coarse := srv.store.DumpRings()
for _, snap := range fine {
if err := stream.Send(storeSnapshotToProto(snap, srv.source, false)); err != nil {
return err
}
}
for _, snap := range coarse {
if err := stream.Send(storeSnapshotToProto(snap, srv.source, true)); err != nil {
return err
}
}
return nil
}
func storeSnapshotToProto(snap st.Snapshot, source string, isCoarse bool) *pb.Snapshot {
msg := &pb.Snapshot{
Source: source,
Timestamp: snap.Timestamp.Unix(),
IsCoarse: isCoarse,
}
for _, e := range snap.Entries {
msg.Entries = append(msg.Entries, &pb.TopNEntry{Label: e.Label, Count: e.Count})
}
return msg
}
func (srv *Server) StreamSnapshots(req *pb.SnapshotRequest, stream grpc.ServerStreamingServer[pb.Snapshot]) error { func (srv *Server) StreamSnapshots(req *pb.SnapshotRequest, stream grpc.ServerStreamingServer[pb.Snapshot]) error {
ch := srv.store.Subscribe() ch := srv.store.Subscribe()
defer srv.store.Unsubscribe(ch) defer srv.store.Unsubscribe(ch)
log.Printf("server: new StreamSnapshots subscriber from %v", stream.Context().Value("peer")) addr := peerAddr(stream.Context())
log.Printf("server: new StreamSnapshots subscriber from %s", addr)
for { for {
select { select {
case <-stream.Context().Done(): case <-stream.Context().Done():
log.Printf("server: StreamSnapshots subscriber disconnected") log.Printf("server: StreamSnapshots subscriber disconnected: %s", addr)
return nil return nil
case snap, ok := <-ch: case snap, ok := <-ch:
if !ok { if !ok {

View File

@@ -104,10 +104,10 @@ func TestGRPCEndToEnd(t *testing.T) {
// Pre-populate with known data then rotate so it's queryable // Pre-populate with known data then rotate so it's queryable
for i := 0; i < 500; i++ { for i := 0; i < 500; i++ {
store.ingest(LogRecord{"busy.com", "1.2.3.0/24", "/api", "200"}) store.ingest(LogRecord{Website: "busy.com", ClientPrefix: "1.2.3.0/24", URI: "/api", Status: "200"})
} }
for i := 0; i < 200; i++ { for i := 0; i < 200; i++ {
store.ingest(LogRecord{"quiet.com", "5.6.7.0/24", "/", "429"}) store.ingest(LogRecord{Website: "quiet.com", ClientPrefix: "5.6.7.0/24", URI: "/", Status: "429"})
} }
store.rotate(time.Now()) store.rotate(time.Now())
@@ -192,7 +192,7 @@ func TestGRPCEndToEnd(t *testing.T) {
t.Fatalf("StreamSnapshots error: %v", err) t.Fatalf("StreamSnapshots error: %v", err)
} }
store.ingest(LogRecord{"new.com", "9.9.9.0/24", "/new", "200"}) store.ingest(LogRecord{Website: "new.com", ClientPrefix: "9.9.9.0/24", URI: "/new", Status: "200"})
store.rotate(time.Now()) store.rotate(time.Now())
snap, err := stream.Recv() snap, err := stream.Recv()

View File

@@ -4,8 +4,8 @@ import (
"sync" "sync"
"time" "time"
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
st "git.ipng.ch/ipng/nginx-logtail/internal/store" st "git.ipng.ch/ipng/nginx-logtail/internal/store"
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
) )
const liveMapCap = 100_000 // hard cap on live map entries const liveMapCap = 100_000 // hard cap on live map entries
@@ -13,9 +13,10 @@ const liveMapCap = 100_000 // hard cap on live map entries
// Store holds the live map and both ring buffers. // Store holds the live map and both ring buffers.
type Store struct { type Store struct {
source string source string
prom *PromStore // optional; if non-nil, receives every ingested record
// live map — written only by the Run goroutine; no locking needed on writes // live map — written only by the Run goroutine; no locking needed on writes
live map[st.Tuple4]int64 live map[st.Tuple6]int64
liveLen int liveLen int
// ring buffers — protected by mu for reads // ring buffers — protected by mu for reads
@@ -36,15 +37,18 @@ type Store struct {
func NewStore(source string) *Store { func NewStore(source string) *Store {
return &Store{ return &Store{
source: source, source: source,
live: make(map[st.Tuple4]int64, liveMapCap), live: make(map[st.Tuple6]int64, liveMapCap),
subs: make(map[chan st.Snapshot]struct{}), subs: make(map[chan st.Snapshot]struct{}),
} }
} }
// ingest records one log record into the live map. // ingest records one log record into the live map and the Prometheus store (if set).
// Must only be called from the Run goroutine. // Must only be called from the Run goroutine.
func (s *Store) ingest(r LogRecord) { func (s *Store) ingest(r LogRecord) {
key := st.Tuple4{Website: r.Website, Prefix: r.ClientPrefix, URI: r.URI, Status: r.Status} if s.prom != nil {
s.prom.Ingest(r)
}
key := st.Tuple6{Website: r.Website, Prefix: r.ClientPrefix, URI: r.URI, Status: r.Status, IsTor: r.IsTor, ASN: r.ASN}
if _, exists := s.live[key]; !exists { if _, exists := s.live[key]; !exists {
if s.liveLen >= liveMapCap { if s.liveLen >= liveMapCap {
return return
@@ -77,7 +81,7 @@ func (s *Store) rotate(now time.Time) {
} }
s.mu.Unlock() s.mu.Unlock()
s.live = make(map[st.Tuple4]int64, liveMapCap) s.live = make(map[st.Tuple6]int64, liveMapCap)
s.liveLen = 0 s.liveLen = 0
s.broadcast(fine) s.broadcast(fine)
@@ -150,6 +154,32 @@ func (s *Store) coarseView() st.RingView {
return st.RingView{Ring: ring, Head: s.coarseHead, Size: st.CoarseRingSize} return st.RingView{Ring: ring, Head: s.coarseHead, Size: st.CoarseRingSize}
} }
// DumpRings returns copies of all non-empty fine and coarse ring snapshots in
// chronological order. The lock is held only for the duration of the copy.
func (s *Store) DumpRings() (fine, coarse []st.Snapshot) {
s.mu.RLock()
fineRing := s.fineRing
fineHead := s.fineHead
fineFilled := s.fineFilled
coarseRing := s.coarseRing
coarseHead := s.coarseHead
coarseFilled := s.coarseFilled
s.mu.RUnlock()
fine = make([]st.Snapshot, 0, fineFilled)
for i := 0; i < fineFilled; i++ {
idx := (fineHead - fineFilled + i + st.FineRingSize) % st.FineRingSize
fine = append(fine, fineRing[idx])
}
coarse = make([]st.Snapshot, 0, coarseFilled)
for i := 0; i < coarseFilled; i++ {
idx := (coarseHead - coarseFilled + i + st.CoarseRingSize) % st.CoarseRingSize
coarse = append(coarse, coarseRing[idx])
}
return fine, coarse
}
func (s *Store) Subscribe() chan st.Snapshot { func (s *Store) Subscribe() chan st.Snapshot {
ch := make(chan st.Snapshot, 4) ch := make(chan st.Snapshot, 4)
s.subMu.Lock() s.subMu.Lock()

View File

@@ -5,8 +5,8 @@ import (
"testing" "testing"
"time" "time"
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
st "git.ipng.ch/ipng/nginx-logtail/internal/store" st "git.ipng.ch/ipng/nginx-logtail/internal/store"
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
) )
func makeStore() *Store { func makeStore() *Store {
@@ -15,7 +15,7 @@ func makeStore() *Store {
func ingestN(s *Store, website, prefix, uri, status string, n int) { func ingestN(s *Store, website, prefix, uri, status string, n int) {
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
s.ingest(LogRecord{website, prefix, uri, status}) s.ingest(LogRecord{Website: website, ClientPrefix: prefix, URI: uri, Status: status})
} }
} }

View File

@@ -28,20 +28,25 @@ type reopenMsg struct {
// fsnotify.Watcher (one inotify instance). This scales to hundreds of files // fsnotify.Watcher (one inotify instance). This scales to hundreds of files
// without hitting the kernel limit on inotify instances per user. // without hitting the kernel limit on inotify instances per user.
type MultiTailer struct { type MultiTailer struct {
paths []string patterns []string
v4bits int scanInterval time.Duration
v6bits int v4bits int
ch chan<- LogRecord v6bits int
ch chan<- LogRecord
} }
func NewMultiTailer(paths []string, v4bits, v6bits int, ch chan<- LogRecord) *MultiTailer { func NewMultiTailer(patterns []string, scanInterval time.Duration, v4bits, v6bits int, ch chan<- LogRecord) *MultiTailer {
return &MultiTailer{paths: paths, v4bits: v4bits, v6bits: v6bits, ch: ch} return &MultiTailer{patterns: patterns, scanInterval: scanInterval, v4bits: v4bits, v6bits: v6bits, ch: ch}
} }
// Run tails all configured files until ctx is cancelled. // Run tails all configured files until ctx is cancelled.
// All files share one fsnotify.Watcher. Log rotation is handled per-file: // All files share one fsnotify.Watcher. Log rotation is handled per-file:
// on RENAME/REMOVE the old fd is drained then a retry goroutine re-opens // on RENAME/REMOVE the old fd is drained then a retry goroutine re-opens
// the original path and hands it back via a channel. // the original path and hands it back via a channel.
//
// A periodic rescan re-expands the glob patterns so that files created after
// startup are picked up automatically and files that have disappeared (and
// are no longer matched by any pattern) are retired.
func (mt *MultiTailer) Run(ctx context.Context) { func (mt *MultiTailer) Run(ctx context.Context) {
watcher, err := fsnotify.NewWatcher() watcher, err := fsnotify.NewWatcher()
if err != nil { if err != nil {
@@ -49,21 +54,33 @@ func (mt *MultiTailer) Run(ctx context.Context) {
} }
defer watcher.Close() defer watcher.Close()
files := make(map[string]*fileState, len(mt.paths)) files := make(map[string]*fileState)
reopenCh := make(chan reopenMsg, len(mt.paths)) retrying := make(map[string]struct{}) // paths currently in a retryOpen goroutine
reopenCh := make(chan reopenMsg, 32)
// Open all files and seek to EOF. startRetry := func(path string) {
for _, path := range mt.paths { if _, already := retrying[path]; already {
return
}
retrying[path] = struct{}{}
go retryOpen(ctx, path, watcher, reopenCh)
}
// Initial scan.
for _, path := range expandGlobs(mt.patterns) {
fs, err := openAndSeekEOF(path, watcher) fs, err := openAndSeekEOF(path, watcher)
if err != nil { if err != nil {
log.Printf("tailer: %s not found, will retry: %v", path, err) log.Printf("tailer: %s not found, will retry: %v", path, err)
go retryOpen(ctx, path, watcher, reopenCh) startRetry(path)
continue continue
} }
files[path] = fs files[path] = fs
log.Printf("tailer: watching %s", path) log.Printf("tailer: watching %s", path)
} }
ticker := time.NewTicker(mt.scanInterval)
defer ticker.Stop()
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -76,12 +93,16 @@ func (mt *MultiTailer) Run(ctx context.Context) {
if !ok { if !ok {
return return
} }
delete(retrying, msg.path)
files[msg.path] = &fileState{f: msg.f, reader: bufio.NewReader(msg.f)} files[msg.path] = &fileState{f: msg.f, reader: bufio.NewReader(msg.f)}
if err := watcher.Add(msg.path); err != nil { if err := watcher.Add(msg.path); err != nil {
log.Printf("tailer: watcher re-add failed for %s: %v", msg.path, err) log.Printf("tailer: watcher re-add failed for %s: %v", msg.path, err)
} }
log.Printf("tailer: re-opened %s after rotation", msg.path) log.Printf("tailer: re-opened %s after rotation", msg.path)
case <-ticker.C:
mt.rescan(ctx, watcher, files, retrying, reopenCh, startRetry)
case event, ok := <-watcher.Events: case event, ok := <-watcher.Events:
if !ok { if !ok {
return return
@@ -99,7 +120,7 @@ func (mt *MultiTailer) Run(ctx context.Context) {
fs.f.Close() fs.f.Close()
delete(files, event.Name) delete(files, event.Name)
_ = watcher.Remove(event.Name) _ = watcher.Remove(event.Name)
go retryOpen(ctx, event.Name, watcher, reopenCh) startRetry(event.Name)
} }
case err, ok := <-watcher.Errors: case err, ok := <-watcher.Errors:
@@ -111,6 +132,49 @@ func (mt *MultiTailer) Run(ctx context.Context) {
} }
} }
// rescan re-expands the glob patterns and reconciles against the current file
// set: new matches are opened (or queued for retry), and files no longer
// matched by any pattern are drained, closed, and retired.
func (mt *MultiTailer) rescan(
ctx context.Context,
watcher *fsnotify.Watcher,
files map[string]*fileState,
retrying map[string]struct{},
_ chan reopenMsg,
startRetry func(string),
) {
current := make(map[string]struct{})
for _, path := range expandGlobs(mt.patterns) {
current[path] = struct{}{}
if _, inFiles := files[path]; inFiles {
continue
}
if _, isRetrying := retrying[path]; isRetrying {
continue
}
// Newly matched file — try to open it right away.
fs, err := openAndSeekEOF(path, watcher)
if err != nil {
startRetry(path)
continue
}
files[path] = fs
log.Printf("tailer: discovered %s", path)
}
// Retire files that no longer match any pattern and are not being rotated
// (rotation is handled by the RENAME/REMOVE event path, not here).
for path, fs := range files {
if _, matched := current[path]; !matched {
mt.readLines(fs.reader)
fs.f.Close()
_ = watcher.Remove(path)
delete(files, path)
log.Printf("tailer: retired %s (no longer matched by any pattern)", path)
}
}
}
// openAndSeekEOF opens path, seeks to EOF, and registers it with watcher. // openAndSeekEOF opens path, seeks to EOF, and registers it with watcher.
func openAndSeekEOF(path string, watcher *fsnotify.Watcher) (*fileState, error) { func openAndSeekEOF(path string, watcher *fsnotify.Watcher) (*fileState, error) {
f, err := os.Open(path) f, err := os.Open(path)

View File

@@ -28,7 +28,7 @@ func TestMultiTailerReadsLines(t *testing.T) {
defer f.Close() defer f.Close()
ch := make(chan LogRecord, 100) ch := make(chan LogRecord, 100)
mt := NewMultiTailer([]string{path}, 24, 48, ch) mt := NewMultiTailer([]string{path}, time.Hour, 24, 48, ch)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@@ -67,7 +67,7 @@ func TestMultiTailerMultipleFiles(t *testing.T) {
} }
ch := make(chan LogRecord, 200) ch := make(chan LogRecord, 200)
mt := NewMultiTailer(paths, 24, 48, ch) mt := NewMultiTailer(paths, time.Hour, 24, 48, ch)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
go mt.Run(ctx) go mt.Run(ctx)
@@ -95,7 +95,7 @@ func TestMultiTailerLogRotation(t *testing.T) {
} }
ch := make(chan LogRecord, 100) ch := make(chan LogRecord, 100)
mt := NewMultiTailer([]string{path}, 24, 48, ch) mt := NewMultiTailer([]string{path}, time.Hour, 24, 48, ch)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
go mt.Run(ctx) go mt.Run(ctx)

View File

@@ -19,9 +19,11 @@ var andRe = regexp.MustCompile(`(?i)\s+and\s+`)
// //
// status=200 status!=200 status>=400 status>400 status<=500 status<500 // status=200 status!=200 status>=400 status>400 status<=500 status<500
// website=example.com — exact match // website=example.com — exact match
// website~=gouda.* — RE2 regex // website~=gouda.* — RE2 regex match
// website!~=gouda.* — RE2 regex exclusion
// uri=/api/v1/ — exact match // uri=/api/v1/ — exact match
// uri~=^/api/.* — RE2 regex // uri~=^/api/.* — RE2 regex match
// uri!~=^/ct/.* — RE2 regex exclusion
// prefix=1.2.3.0/24 — exact match // prefix=1.2.3.0/24 — exact match
// //
// Values may be enclosed in double or single quotes. // Values may be enclosed in double or single quotes.
@@ -57,6 +59,8 @@ func applyTerm(term string, fs *filterState) error {
var op, value string var op, value string
switch { switch {
case strings.HasPrefix(rest, "!~="):
op, value = "!~=", rest[3:]
case strings.HasPrefix(rest, "~="): case strings.HasPrefix(rest, "~="):
op, value = "~=", rest[2:] op, value = "~=", rest[2:]
case strings.HasPrefix(rest, "!="): case strings.HasPrefix(rest, "!="):
@@ -96,8 +100,10 @@ func applyTerm(term string, fs *filterState) error {
fs.Website = value fs.Website = value
case "~=": case "~=":
fs.WebsiteRe = value fs.WebsiteRe = value
case "!~=":
fs.WebsiteReNeg = value
default: default:
return fmt.Errorf("website only supports = and ~=, not %q", op) return fmt.Errorf("website only supports =, ~=, and !~=, not %q", op)
} }
case "uri": case "uri":
switch op { switch op {
@@ -105,16 +111,43 @@ func applyTerm(term string, fs *filterState) error {
fs.URI = value fs.URI = value
case "~=": case "~=":
fs.URIRe = value fs.URIRe = value
case "!~=":
fs.URIReNeg = value
default: default:
return fmt.Errorf("uri only supports = and ~=, not %q", op) return fmt.Errorf("uri only supports =, ~=, and !~=, not %q", op)
} }
case "prefix": case "prefix":
if op != "=" { if op != "=" {
return fmt.Errorf("prefix only supports =, not %q", op) return fmt.Errorf("prefix only supports =, not %q", op)
} }
fs.Prefix = value fs.Prefix = value
case "is_tor":
if op != "=" && op != "!=" {
return fmt.Errorf("is_tor only supports = and !=, not %q", op)
}
if value != "0" && value != "1" {
return fmt.Errorf("is_tor value must be 0 or 1, not %q", value)
}
// Normalise: is_tor=1 and is_tor!=0 both mean "TOR only"
if (op == "=" && value == "1") || (op == "!=" && value == "0") {
fs.IsTor = "1"
} else {
fs.IsTor = "0"
}
case "asn":
if op == "~=" {
return fmt.Errorf("asn does not support ~=; use =, !=, >=, >, <=, <")
}
expr := op + value
if op == "=" {
expr = value
}
if _, _, ok := st.ParseStatusExpr(expr); !ok {
return fmt.Errorf("invalid asn expression %q", expr)
}
fs.ASN = expr
default: default:
return fmt.Errorf("unknown field %q; valid: status, website, uri, prefix", field) return fmt.Errorf("unknown field %q; valid: status, website, uri, prefix, is_tor, asn", field)
} }
return nil return nil
} }
@@ -139,6 +172,9 @@ func FilterExprString(f filterState) string {
if f.WebsiteRe != "" { if f.WebsiteRe != "" {
parts = append(parts, "website~="+quoteMaybe(f.WebsiteRe)) parts = append(parts, "website~="+quoteMaybe(f.WebsiteRe))
} }
if f.WebsiteReNeg != "" {
parts = append(parts, "website!~="+quoteMaybe(f.WebsiteReNeg))
}
if f.Prefix != "" { if f.Prefix != "" {
parts = append(parts, "prefix="+quoteMaybe(f.Prefix)) parts = append(parts, "prefix="+quoteMaybe(f.Prefix))
} }
@@ -148,12 +184,33 @@ func FilterExprString(f filterState) string {
if f.URIRe != "" { if f.URIRe != "" {
parts = append(parts, "uri~="+quoteMaybe(f.URIRe)) parts = append(parts, "uri~="+quoteMaybe(f.URIRe))
} }
if f.URIReNeg != "" {
parts = append(parts, "uri!~="+quoteMaybe(f.URIReNeg))
}
if f.Status != "" { if f.Status != "" {
parts = append(parts, statusTermStr(f.Status)) parts = append(parts, statusTermStr(f.Status))
} }
if f.IsTor != "" {
parts = append(parts, "is_tor="+f.IsTor)
}
if f.ASN != "" {
parts = append(parts, asnTermStr(f.ASN))
}
return strings.Join(parts, " AND ") return strings.Join(parts, " AND ")
} }
// asnTermStr converts a stored ASN expression (">=1000", "12345") to a
// full filter term ("asn>=1000", "asn=12345").
func asnTermStr(expr string) string {
if expr == "" {
return ""
}
if len(expr) > 0 && (expr[0] == '!' || expr[0] == '>' || expr[0] == '<') {
return "asn" + expr
}
return "asn=" + expr
}
// statusTermStr converts a stored status expression (">=400", "200") to a // statusTermStr converts a stored status expression (">=400", "200") to a
// full filter term ("status>=400", "status=200"). // full filter term ("status>=400", "status=200").
func statusTermStr(expr string) string { func statusTermStr(expr string) string {

View File

@@ -258,3 +258,77 @@ func TestFilterExprRoundTrip(t *testing.T) {
} }
} }
} }
func TestParseAsnEQ(t *testing.T) {
fs, err := ParseFilterExpr("asn=12345")
if err != nil || fs.ASN != "12345" {
t.Fatalf("got err=%v fs=%+v", err, fs)
}
}
func TestParseAsnNE(t *testing.T) {
fs, err := ParseFilterExpr("asn!=65000")
if err != nil || fs.ASN != "!=65000" {
t.Fatalf("got err=%v fs=%+v", err, fs)
}
}
func TestParseAsnGE(t *testing.T) {
fs, err := ParseFilterExpr("asn>=1000")
if err != nil || fs.ASN != ">=1000" {
t.Fatalf("got err=%v fs=%+v", err, fs)
}
}
func TestParseAsnLT(t *testing.T) {
fs, err := ParseFilterExpr("asn<64512")
if err != nil || fs.ASN != "<64512" {
t.Fatalf("got err=%v fs=%+v", err, fs)
}
}
func TestParseAsnRegexRejected(t *testing.T) {
_, err := ParseFilterExpr("asn~=123")
if err == nil {
t.Fatal("expected error for asn~=")
}
}
func TestParseAsnInvalidExpr(t *testing.T) {
_, err := ParseFilterExpr("asn=notanumber")
if err == nil {
t.Fatal("expected error for non-numeric ASN")
}
}
func TestFilterExprStringASN(t *testing.T) {
s := FilterExprString(filterState{ASN: "12345"})
if s != "asn=12345" {
t.Fatalf("got %q", s)
}
s = FilterExprString(filterState{ASN: ">=1000"})
if s != "asn>=1000" {
t.Fatalf("got %q", s)
}
}
func TestFilterExprRoundTripASN(t *testing.T) {
cases := []filterState{
{ASN: "12345"},
{ASN: "!=65000"},
{ASN: ">=1000"},
{ASN: "<64512"},
{Status: ">=400", ASN: "12345"},
}
for _, fs := range cases {
expr := FilterExprString(fs)
fs2, err := ParseFilterExpr(expr)
if err != nil {
t.Errorf("round-trip parse error for %+v → %q: %v", fs, expr, err)
continue
}
if fs2 != fs {
t.Errorf("round-trip mismatch: %+v → %q → %+v", fs, expr, fs2)
}
}
}

View File

@@ -220,8 +220,17 @@ func TestDrillURL(t *testing.T) {
if !strings.Contains(u, "f_status=429") { if !strings.Contains(u, "f_status=429") {
t.Errorf("drill from status: missing f_status in %q", u) t.Errorf("drill from status: missing f_status in %q", u)
} }
if !strings.Contains(u, "by=asn") {
t.Errorf("drill from status: expected next by=asn in %q", u)
}
p.GroupByS = "asn"
u = p.drillURL("12345")
if !strings.Contains(u, "f_asn=12345") {
t.Errorf("drill from asn: missing f_asn in %q", u)
}
if !strings.Contains(u, "by=website") { if !strings.Contains(u, "by=website") {
t.Errorf("drill from status: expected cycle back to by=website in %q", u) t.Errorf("drill from asn: expected cycle back to by=website in %q", u)
} }
} }

View File

@@ -13,6 +13,7 @@ import (
st "git.ipng.ch/ipng/nginx-logtail/internal/store" st "git.ipng.ch/ipng/nginx-logtail/internal/store"
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb" pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
"google.golang.org/grpc"
) )
// Handler is the HTTP handler for the frontend. // Handler is the HTTP handler for the frontend.
@@ -47,12 +48,16 @@ type TableRow struct {
// filterState holds the filter fields parsed from URL params. // filterState holds the filter fields parsed from URL params.
type filterState struct { type filterState struct {
Website string Website string
Prefix string Prefix string
URI string URI string
Status string // expression: "200", "!=200", ">=400", etc. Status string // expression: "200", "!=200", ">=400", etc.
WebsiteRe string // RE2 regex against website WebsiteRe string // RE2 regex against website
URIRe string // RE2 regex against request URI URIRe string // RE2 regex against request URI
WebsiteReNeg string // RE2 regex exclusion against website
URIReNeg string // RE2 regex exclusion against request URI
IsTor string // "", "1" (TOR only), "0" (non-TOR only)
ASN string // expression: "12345", "!=65000", ">=1000", etc.
} }
// QueryParams holds all parsed URL parameters for one page request. // QueryParams holds all parsed URL parameters for one page request.
@@ -77,6 +82,7 @@ type PageData struct {
Windows []Tab Windows []Tab
GroupBys []Tab GroupBys []Tab
Targets []Tab // source/target picker; empty when only one target available Targets []Tab // source/target picker; empty when only one target available
TorTabs []Tab // all / tor / no-tor toggle
RefreshSecs int RefreshSecs int
Error string Error string
FilterExpr string // current filter serialised to mini-language for the input box FilterExpr string // current filter serialised to mini-language for the input box
@@ -89,7 +95,7 @@ var windowSpecs = []struct{ s, label string }{
} }
var groupBySpecs = []struct{ s, label string }{ var groupBySpecs = []struct{ s, label string }{
{"website", "website"}, {"prefix", "prefix"}, {"uri", "uri"}, {"status", "status"}, {"website", "website"}, {"asn", "asn"}, {"prefix", "prefix"}, {"status", "status"}, {"uri", "uri"},
} }
func parseWindowString(s string) (pb.Window, string) { func parseWindowString(s string) (pb.Window, string) {
@@ -119,6 +125,8 @@ func parseGroupByString(s string) (pb.GroupBy, string) {
return pb.GroupBy_REQUEST_URI, "uri" return pb.GroupBy_REQUEST_URI, "uri"
case "status": case "status":
return pb.GroupBy_HTTP_RESPONSE, "status" return pb.GroupBy_HTTP_RESPONSE, "status"
case "asn":
return pb.GroupBy_ASN_NUMBER, "asn"
default: default:
return pb.GroupBy_WEBSITE, "website" return pb.GroupBy_WEBSITE, "website"
} }
@@ -150,18 +158,22 @@ func (h *Handler) parseParams(r *http.Request) QueryParams {
GroupByS: grpS, GroupByS: grpS,
N: n, N: n,
Filter: filterState{ Filter: filterState{
Website: q.Get("f_website"), Website: q.Get("f_website"),
Prefix: q.Get("f_prefix"), Prefix: q.Get("f_prefix"),
URI: q.Get("f_uri"), URI: q.Get("f_uri"),
Status: q.Get("f_status"), Status: q.Get("f_status"),
WebsiteRe: q.Get("f_website_re"), WebsiteRe: q.Get("f_website_re"),
URIRe: q.Get("f_uri_re"), URIRe: q.Get("f_uri_re"),
WebsiteReNeg: q.Get("f_website_re_neg"),
URIReNeg: q.Get("f_uri_re_neg"),
IsTor: q.Get("f_is_tor"),
ASN: q.Get("f_asn"),
}, },
} }
} }
func buildFilter(f filterState) *pb.Filter { func buildFilter(f filterState) *pb.Filter {
if f.Website == "" && f.Prefix == "" && f.URI == "" && f.Status == "" && f.WebsiteRe == "" && f.URIRe == "" { if f.Website == "" && f.Prefix == "" && f.URI == "" && f.Status == "" && f.WebsiteRe == "" && f.URIRe == "" && f.WebsiteReNeg == "" && f.URIReNeg == "" && f.IsTor == "" && f.ASN == "" {
return nil return nil
} }
out := &pb.Filter{} out := &pb.Filter{}
@@ -186,6 +198,24 @@ func buildFilter(f filterState) *pb.Filter {
if f.URIRe != "" { if f.URIRe != "" {
out.UriRegex = &f.URIRe out.UriRegex = &f.URIRe
} }
if f.WebsiteReNeg != "" {
out.WebsiteRegexExclude = &f.WebsiteReNeg
}
if f.URIReNeg != "" {
out.UriRegexExclude = &f.URIReNeg
}
switch f.IsTor {
case "1":
out.Tor = pb.TorFilter_TOR_YES
case "0":
out.Tor = pb.TorFilter_TOR_NO
}
if f.ASN != "" {
if n, op, ok := st.ParseStatusExpr(f.ASN); ok {
out.AsnNumber = &n
out.AsnOp = op
}
}
return out return out
} }
@@ -214,6 +244,18 @@ func (p QueryParams) toValues() url.Values {
if p.Filter.URIRe != "" { if p.Filter.URIRe != "" {
v.Set("f_uri_re", p.Filter.URIRe) v.Set("f_uri_re", p.Filter.URIRe)
} }
if p.Filter.WebsiteReNeg != "" {
v.Set("f_website_re_neg", p.Filter.WebsiteReNeg)
}
if p.Filter.URIReNeg != "" {
v.Set("f_uri_re_neg", p.Filter.URIReNeg)
}
if p.Filter.IsTor != "" {
v.Set("f_is_tor", p.Filter.IsTor)
}
if p.Filter.ASN != "" {
v.Set("f_asn", p.Filter.ASN)
}
return v return v
} }
@@ -235,7 +277,8 @@ func (p QueryParams) buildURL(overrides map[string]string) string {
func (p QueryParams) clearFilterURL() string { func (p QueryParams) clearFilterURL() string {
return p.buildURL(map[string]string{ return p.buildURL(map[string]string{
"f_website": "", "f_prefix": "", "f_uri": "", "f_status": "", "f_website": "", "f_prefix": "", "f_uri": "", "f_status": "",
"f_website_re": "", "f_uri_re": "", "f_website_re": "", "f_uri_re": "", "f_website_re_neg": "", "f_uri_re_neg": "",
"f_is_tor": "", "f_asn": "",
}) })
} }
@@ -248,7 +291,9 @@ func nextGroupBy(s string) string {
return "uri" return "uri"
case "uri": case "uri":
return "status" return "status"
default: // status → back to website case "status":
return "asn"
default: // asn → back to website
return "website" return "website"
} }
} }
@@ -264,6 +309,8 @@ func groupByFilterKey(s string) string {
return "f_uri" return "f_uri"
case "status": case "status":
return "f_status" return "f_status"
case "asn":
return "f_asn"
default: default:
return "f_website" return "f_website"
} }
@@ -314,6 +361,36 @@ func buildCrumbs(p QueryParams) []Crumb {
RemoveURL: p.buildURL(map[string]string{"f_uri_re": ""}), RemoveURL: p.buildURL(map[string]string{"f_uri_re": ""}),
}) })
} }
if p.Filter.WebsiteReNeg != "" {
crumbs = append(crumbs, Crumb{
Text: "website!~=" + p.Filter.WebsiteReNeg,
RemoveURL: p.buildURL(map[string]string{"f_website_re_neg": ""}),
})
}
if p.Filter.URIReNeg != "" {
crumbs = append(crumbs, Crumb{
Text: "uri!~=" + p.Filter.URIReNeg,
RemoveURL: p.buildURL(map[string]string{"f_uri_re_neg": ""}),
})
}
switch p.Filter.IsTor {
case "1":
crumbs = append(crumbs, Crumb{
Text: "is_tor=1 (TOR only)",
RemoveURL: p.buildURL(map[string]string{"f_is_tor": ""}),
})
case "0":
crumbs = append(crumbs, Crumb{
Text: "is_tor=0 (no TOR)",
RemoveURL: p.buildURL(map[string]string{"f_is_tor": ""}),
})
}
if p.Filter.ASN != "" {
crumbs = append(crumbs, Crumb{
Text: asnTermStr(p.Filter.ASN),
RemoveURL: p.buildURL(map[string]string{"f_asn": ""}),
})
}
return crumbs return crumbs
} }
@@ -341,6 +418,23 @@ func buildGroupByTabs(p QueryParams) []Tab {
return tabs return tabs
} }
func buildTorTabs(p QueryParams) []Tab {
specs := []struct{ val, label string }{
{"", "all"},
{"1", "tor"},
{"0", "no tor"},
}
tabs := make([]Tab, len(specs))
for i, s := range specs {
tabs[i] = Tab{
Label: s.label,
URL: p.buildURL(map[string]string{"f_is_tor": s.val}),
Active: p.Filter.IsTor == s.val,
}
}
return tabs
}
// buildTargetTabs builds the source/target picker tabs from a ListTargets response. // buildTargetTabs builds the source/target picker tabs from a ListTargets response.
// Returns nil (hide picker) when only one endpoint is reachable. // Returns nil (hide picker) when only one endpoint is reachable.
func (h *Handler) buildTargetTabs(p QueryParams, lt *pb.ListTargetsResponse) []Tab { func (h *Handler) buildTargetTabs(p QueryParams, lt *pb.ListTargetsResponse) []Tab {
@@ -462,7 +556,21 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
trendCh <- trendResult{resp, err} trendCh <- trendResult{resp, err}
}() }()
go func() { go func() {
resp, err := client.ListTargets(ctx, &pb.ListTargetsRequest{}) // Always query the default target for ListTargets so we get the full
// list of available sources even when viewing a specific collector.
ltClient := client
var ltConn *grpc.ClientConn
if params.Target != h.defaultTarget {
c, cl, err := dial(h.defaultTarget)
if err == nil {
ltConn = c
ltClient = cl
}
}
resp, err := ltClient.ListTargets(ctx, &pb.ListTargetsRequest{})
if ltConn != nil {
ltConn.Close()
}
if err != nil { if err != nil {
ltCh <- nil ltCh <- nil
} else { } else {
@@ -502,6 +610,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
Breadcrumbs: buildCrumbs(params), Breadcrumbs: buildCrumbs(params),
Windows: buildWindowTabs(params), Windows: buildWindowTabs(params),
GroupBys: buildGroupByTabs(params), GroupBys: buildGroupByTabs(params),
TorTabs: buildTorTabs(params),
Targets: h.buildTargetTabs(params, lt), Targets: h.buildTargetTabs(params, lt),
RefreshSecs: h.refreshSecs, RefreshSecs: h.refreshSecs,
FilterExpr: filterExprInput, FilterExpr: filterExprInput,
@@ -524,6 +633,7 @@ func (h *Handler) errorPage(params QueryParams, msg string) PageData {
Params: params, Params: params,
Windows: buildWindowTabs(params), Windows: buildWindowTabs(params),
GroupBys: buildGroupByTabs(params), GroupBys: buildGroupByTabs(params),
TorTabs: buildTorTabs(params),
Breadcrumbs: buildCrumbs(params), Breadcrumbs: buildCrumbs(params),
RefreshSecs: h.refreshSecs, RefreshSecs: h.refreshSecs,
Error: msg, Error: msg,

View File

@@ -9,6 +9,7 @@ import (
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
"strconv"
"syscall" "syscall"
) )
@@ -16,10 +17,10 @@ import (
var templatesFS embed.FS var templatesFS embed.FS
func main() { func main() {
listen := flag.String("listen", ":8080", "HTTP listen address") listen := flag.String("listen", envOr("FRONTEND_LISTEN", ":8080"), "HTTP listen address (env: FRONTEND_LISTEN)")
target := flag.String("target", "localhost:9091", "default gRPC endpoint (aggregator or collector)") target := flag.String("target", envOr("FRONTEND_TARGET", "localhost:9091"), "default gRPC endpoint, aggregator or collector (env: FRONTEND_TARGET)")
n := flag.Int("n", 25, "default number of table rows") n := flag.Int("n", envOrInt("FRONTEND_N", 25), "default number of table rows (env: FRONTEND_N)")
refresh := flag.Int("refresh", 30, "meta-refresh interval in seconds (0 = disabled)") refresh := flag.Int("refresh", envOrInt("FRONTEND_REFRESH", 30), "meta-refresh interval in seconds, 0 to disable (env: FRONTEND_REFRESH)")
flag.Parse() flag.Parse()
funcMap := template.FuncMap{"fmtCount": fmtCount} funcMap := template.FuncMap{"fmtCount": fmtCount}
@@ -51,3 +52,20 @@ func main() {
log.Printf("frontend: shutting down") log.Printf("frontend: shutting down")
srv.Shutdown(context.Background()) srv.Shutdown(context.Background())
} }
func envOr(key, def string) string {
if v := os.Getenv(key); v != "" {
return v
}
return def
}
func envOrInt(key string, def int) int {
if v := os.Getenv(key); v != "" {
if n, err := strconv.Atoi(v); err == nil {
return n
}
log.Printf("frontend: invalid int for %s=%q, using default %d", key, v, def)
}
return def
}

View File

@@ -35,6 +35,7 @@ a:hover { text-decoration: underline; }
.nodata { color: #999; margin: 2em 0; font-style: italic; } .nodata { color: #999; margin: 2em 0; font-style: italic; }
footer { margin-top: 2em; padding-top: 0.6em; border-top: 1px solid #e0e0e0; font-size: 0.8em; color: #999; } footer { margin-top: 2em; padding-top: 0.6em; border-top: 1px solid #e0e0e0; font-size: 0.8em; color: #999; }
.tabs-targets { margin-top: -0.4em; } .tabs-targets { margin-top: -0.4em; }
.tabs-tor { margin-top: -0.4em; }
.tabs-label { font-size: 0.85em; color: #888; margin-right: 0.2em; align-self: center; } .tabs-label { font-size: 0.85em; color: #888; margin-right: 0.2em; align-self: center; }
.filter-form { display: flex; gap: 0.4em; align-items: center; margin-bottom: 0.7em; } .filter-form { display: flex; gap: 0.4em; align-items: center; margin-bottom: 0.7em; }
.filter-input { flex: 1; font-family: monospace; font-size: 13px; padding: 0.25em 0.5em; border: 1px solid #aaa; } .filter-input { flex: 1; font-family: monospace; font-size: 13px; padding: 0.25em 0.5em; border: 1px solid #aaa; }

View File

@@ -20,12 +20,19 @@
{{- end}} {{- end}}
</div>{{end}} </div>{{end}}
<div class="tabs tabs-tor">
<span class="tabs-label">tor:</span>
{{- range .TorTabs}}
<a href="{{.URL}}"{{if .Active}} class="active"{{end}}>{{.Label}}</a>
{{- end}}
</div>
<form class="filter-form" method="get" action="/"> <form class="filter-form" method="get" action="/">
<input type="hidden" name="target" value="{{.Params.Target}}"> <input type="hidden" name="target" value="{{.Params.Target}}">
<input type="hidden" name="w" value="{{.Params.WindowS}}"> <input type="hidden" name="w" value="{{.Params.WindowS}}">
<input type="hidden" name="by" value="{{.Params.GroupByS}}"> <input type="hidden" name="by" value="{{.Params.GroupByS}}">
<input type="hidden" name="n" value="{{.Params.N}}"> <input type="hidden" name="n" value="{{.Params.N}}">
<input class="filter-input" type="text" name="q" value="{{.FilterExpr}}" placeholder="status>=400 AND website~=gouda.* AND uri~=^/api/"> <input class="filter-input" type="text" name="q" value="{{.FilterExpr}}" placeholder="status>=400 AND website!~=^gouda.* AND uri~=^/ct/v1/ AND is_tor=0 AND asn=8298">
<button type="submit">filter</button> <button type="submit">filter</button>
{{- if .FilterExpr}} <a class="clear" href="{{.ClearFilterURL}}">× clear</a>{{end}} {{- if .FilterExpr}} <a class="clear" href="{{.ClearFilterURL}}">× clear</a>{{end}}
</form> </form>

1
dashboards/README.md Normal file
View File

@@ -0,0 +1 @@
This nginx-logtail dashboard is just to get you started. It was autogenerated by Claude.

File diff suppressed because it is too large Load Diff

26
docker-compose.yml Normal file
View File

@@ -0,0 +1,26 @@
services:
aggregator:
build: .
image: git.ipng.ch/ipng/nginx-logtail
command: ["/usr/local/bin/aggregator"]
restart: unless-stopped
environment:
AGGREGATOR_LISTEN: ":9091"
AGGREGATOR_COLLECTORS: "" # e.g. "collector1:9090,collector2:9090"
AGGREGATOR_SOURCE: "" # defaults to container hostname
ports:
- "9091:9091"
frontend:
image: git.ipng.ch/ipng/nginx-logtail
command: ["/usr/local/bin/frontend"]
restart: unless-stopped
environment:
FRONTEND_LISTEN: ":8080"
FRONTEND_TARGET: "aggregator:9091"
FRONTEND_N: "25"
FRONTEND_REFRESH: "30"
ports:
- "8080:8080"
depends_on:
- aggregator

528
docs/DETAILS.md Normal file
View File

@@ -0,0 +1,528 @@
PREAMBLE
Although this computer program has a permissive license (AP2.0), if you came here looking to ask
questions, you're better off just moving on :) This program is shared AS-IS and really without any
intent for anybody but IPng Networks to use it. Also, in case the structure of the repo and the
style of this README wasn't already clear, this program is 100% written and maintained by Claude
Code.
You have been warned :)
SPECIFICATION
This project contains four programs:
1) A **collector** that tails any number of nginx log files and maintains an in-memory structure of
`{website, client_prefix, http_request_uri, http_response, is_tor, asn}` counts across all files.
It answers TopN and Trend queries via gRPC and pushes minute snapshots to the aggregator via
server-streaming. It also exposes a Prometheus `/metrics` endpoint (default `:9100`) with per-host
request counters and response-body/request-time histograms.
Runs on each nginx machine in the cluster. No UI — gRPC and HTTP interfaces only.
2) An **aggregator** that subscribes to the snapshot stream from all collectors, merges their data
into a unified in-memory cache, and exposes the same gRPC interface. Answers questions like "what
is the busiest website globally", "which client prefix is causing the most HTTP 503s", and shows
trending information useful for DDoS detection. Runs on a central machine.
3) An **HTTP frontend** companion to the aggregator that renders a drilldown dashboard. Operators
can restrict by `http_response=429`, then by `website=www.example.com`, and so on. Works with
either a collector or aggregator as its backend. Zero JavaScript — server-rendered HTML with inline
SVG sparklines and meta-refresh.
4) A **CLI** for shell-based debugging. Sends `topn`, `trend`, and `stream` queries to any
collector or aggregator, fans out to multiple targets in parallel, and outputs human-readable
tables or newline-delimited JSON.
Programs are written in Go. No CGO, no external runtime dependencies.
---
![nginx-logtail frontend](docs/frontend.png)
---
DEPLOYMENT
## Docker
All four binaries are published in a single image: `git.ipng.ch/ipng/nginx-logtail`.
The image is built with a two-stage Dockerfile: a `golang:1.24-alpine` builder produces
statically-linked, stripped binaries (`CGO_ENABLED=0`, `-trimpath -ldflags="-s -w"`); the final
stage is `scratch` — no OS, no shell, no runtime dependencies. Each binary is invoked explicitly
via the container `command`.
### Build and push
```
docker compose build --push
```
### Running aggregator + frontend
The `docker-compose.yml` in the repo root runs the aggregator and frontend together. At minimum,
set `AGGREGATOR_COLLECTORS` to the comma-separated `host:port` list of your collector(s):
```sh
AGGREGATOR_COLLECTORS=nginx1:9090,nginx2:9090 docker compose up -d
```
The frontend reaches the aggregator at `aggregator:9091` via Docker's internal DNS. The frontend
UI is available on port `8080`.
### Environment variables
All flags have environment variable equivalents. CLI flags take precedence over env vars.
**collector** (runs on each nginx host, not in Docker):
| Env var | Flag | Default |
|--------------------------|-------------------|-------------|
| `COLLECTOR_LISTEN` | `-listen` | `:9090` |
| `COLLECTOR_PROM_LISTEN` | `-prom-listen` | `:9100` |
| `COLLECTOR_LOGS` | `-logs` | — |
| `COLLECTOR_LOGS_FILE` | `-logs-file` | — |
| `COLLECTOR_SOURCE` | `-source` | hostname |
| `COLLECTOR_V4PREFIX` | `-v4prefix` | `24` |
| `COLLECTOR_V6PREFIX` | `-v6prefix` | `48` |
| `COLLECTOR_SCAN_INTERVAL`| `-scan-interval` | `10s` |
**aggregator**:
| Env var | Flag | Default |
|--------------------------|---------------|-------------|
| `AGGREGATOR_LISTEN` | `-listen` | `:9091` |
| `AGGREGATOR_COLLECTORS` | `-collectors` | — (required)|
| `AGGREGATOR_SOURCE` | `-source` | hostname |
**frontend**:
| Env var | Flag | Default |
|------------------|------------|-------------------|
| `FRONTEND_LISTEN`| `-listen` | `:8080` |
| `FRONTEND_TARGET`| `-target` | `localhost:9091` |
| `FRONTEND_N` | `-n` | `25` |
| `FRONTEND_REFRESH`| `-refresh`| `30` |
---
DESIGN
## Directory Layout
```
nginx-logtail/
├── proto/
│ ├── logtail.proto # shared protobuf definitions
│ └── logtailpb/
│ ├── logtail.pb.go # generated: messages, enums
│ └── logtail_grpc.pb.go # generated: service stubs
├── internal/
│ └── store/
│ └── store.go # shared types: Tuple6, Entry, Snapshot, ring helpers
└── cmd/
├── collector/
│ ├── main.go
│ ├── tailer.go # MultiTailer: tail N files via one shared fsnotify watcher
│ ├── parser.go # tab-separated logtail log_format parser (~50 ns/line)
│ ├── store.go # bounded top-K in-memory store + tiered ring buffers
│ └── server.go # gRPC server: TopN, Trend, StreamSnapshots
├── aggregator/
│ ├── main.go
│ ├── subscriber.go # one goroutine per collector; StreamSnapshots with backoff
│ ├── merger.go # delta-merge: O(snapshot_size) per update
│ ├── cache.go # tick-based ring buffer cache served to clients
│ ├── registry.go # TargetRegistry: addr→name map updated from snapshot sources
│ └── server.go # gRPC server (same surface as collector)
├── frontend/
│ ├── main.go
│ ├── handler.go # URL param parsing, concurrent TopN+Trend, template exec
│ ├── filter.go # ParseFilterExpr / FilterExprString mini filter language
│ ├── client.go # gRPC dial helper
│ ├── sparkline.go # TrendPoints → inline SVG polyline
│ ├── format.go # fmtCount (space thousands separator)
│ └── templates/
│ ├── base.html # outer HTML shell, inline CSS, meta-refresh
│ └── index.html # window tabs, group-by tabs, breadcrumb, table, footer
└── cli/
├── main.go # subcommand dispatch and usage
├── flags.go # shared flags, parseTargets, buildFilter, parseWindow
├── client.go # gRPC dial helper
├── format.go # printTable, fmtCount, fmtTime, targetHeader
├── cmd_topn.go # topn: concurrent fan-out, table + JSON output
├── cmd_trend.go # trend: concurrent fan-out, table + JSON output
├── cmd_stream.go # stream: multiplexed streams, auto-reconnect
└── cmd_targets.go # targets: list collectors known to the endpoint
```
## Data Model
The core unit is a **count keyed by six dimensions**:
| Field | Description | Example |
|-------------------|------------------------------------------------------|-------------------|
| `website` | nginx `$host` | `www.example.com` |
| `client_prefix` | client IP truncated to /24 IPv4 or /48 IPv6 | `1.2.3.0/24` |
| `http_request_uri`| `$request_uri` path only — query string stripped | `/api/v1/search` |
| `http_response` | HTTP status code | `429` |
| `is_tor` | whether the client IP is a TOR exit node | `1` |
| `asn` | client AS number (MaxMind GeoIP2, 32-bit int) | `8298` |
## Time Windows & Tiered Ring Buffers
Two ring buffers at different resolutions cover all query windows up to 24 hours:
| Tier | Bucket size | Buckets | Top-K/bucket | Covers | Roll-up trigger |
|--------|-------------|---------|--------------|--------|---------------------|
| Fine | 1 min | 60 | 50 000 | 1 h | every minute |
| Coarse | 5 min | 288 | 5 000 | 24 h | every 5 fine ticks |
Supported query windows and which tier they read from:
| Window | Tier | Buckets summed |
|--------|--------|----------------|
| 1 min | fine | last 1 |
| 5 min | fine | last 5 |
| 15 min | fine | last 15 |
| 60 min | fine | all 60 |
| 6 h | coarse | last 72 |
| 24 h | coarse | all 288 |
Every minute: snapshot live map → top-50K → append to fine ring, reset live map.
Every 5 minutes: merge last 5 fine snapshots → top-5K → append to coarse ring.
## Memory Budget (Collector, target ≤ 1 GB)
Entry size: ~30 B website + ~15 B prefix + ~50 B URI + 3 B status + 1 B is_tor + 4 B asn + 8 B count + ~80 B Go map
overhead ≈ **~191 bytes per entry**.
| Structure | Entries | Size |
|-------------------------|-------------|-------------|
| Live map (capped) | 100 000 | ~19 MB |
| Fine ring (60 × 1-min) | 60 × 50 000 | ~558 MB |
| Coarse ring (288 × 5-min)| 288 × 5 000| ~268 MB |
| **Total** | | **~845 MB** |
The live map is **hard-capped at 100 K entries**. Once full, only updates to existing keys are
accepted; new keys are dropped until the next rotation resets the map. This keeps memory bounded
regardless of attack cardinality.
## Future Work — ClickHouse Export (post-MVP)
> **Do not implement until the end-to-end MVP is running.**
The aggregator will optionally write 1-minute pre-aggregated rows to ClickHouse for 7d/30d
historical views. Schema sketch:
```sql
CREATE TABLE logtail (
ts DateTime,
website LowCardinality(String),
client_prefix String,
request_uri LowCardinality(String),
status UInt16,
count UInt64
) ENGINE = SummingMergeTree(count)
PARTITION BY toYYYYMMDD(ts)
ORDER BY (ts, website, status, client_prefix, request_uri);
```
The frontend routes `window=7d|30d` queries to ClickHouse; all shorter windows continue to use
the in-memory cache. Kafka is not needed — the aggregator writes directly. This is purely additive
and does not change any existing interface.
## Protobuf API (`proto/logtail.proto`)
```protobuf
enum TorFilter { TOR_ANY = 0; TOR_YES = 1; TOR_NO = 2; }
enum StatusOp { EQ = 0; NE = 1; GT = 2; GE = 3; LT = 4; LE = 5; }
message Filter {
optional string website = 1;
optional string client_prefix = 2;
optional string http_request_uri = 3;
optional int32 http_response = 4;
StatusOp status_op = 5; // comparison operator for http_response
optional string website_regex = 6; // RE2 regex against website
optional string uri_regex = 7; // RE2 regex against http_request_uri
TorFilter tor = 8; // TOR_ANY (default) / TOR_YES / TOR_NO
optional int32 asn_number = 9; // filter by client ASN
StatusOp asn_op = 10; // comparison operator for asn_number
}
enum GroupBy { WEBSITE = 0; CLIENT_PREFIX = 1; REQUEST_URI = 2; HTTP_RESPONSE = 3; ASN_NUMBER = 4; }
enum Window { W1M = 0; W5M = 1; W15M = 2; W60M = 3; W6H = 4; W24H = 5; }
message TopNRequest { Filter filter = 1; GroupBy group_by = 2; int32 n = 3; Window window = 4; }
message TopNEntry { string label = 1; int64 count = 2; }
message TopNResponse { repeated TopNEntry entries = 1; string source = 2; }
// Trend: one total count per minute (or 5-min) bucket, for sparklines
message TrendRequest { Filter filter = 1; Window window = 4; }
message TrendPoint { int64 timestamp_unix = 1; int64 count = 2; }
message TrendResponse { repeated TrendPoint points = 1; string source = 2; }
// Streaming: collector pushes a fine snapshot after every minute rotation
message SnapshotRequest {}
message Snapshot {
string source = 1;
int64 timestamp = 2;
repeated TopNEntry entries = 3; // full top-50K for this bucket
bool is_coarse = 4; // true for 5-min coarse buckets (DumpSnapshots only)
}
// Target discovery: list the collectors behind the queried endpoint
message ListTargetsRequest {}
message TargetInfo {
string name = 1; // display name (--source value from the collector)
string addr = 2; // gRPC address; empty string means "this endpoint itself"
}
message ListTargetsResponse { repeated TargetInfo targets = 1; }
// Backfill: dump full ring buffer contents for aggregator restart recovery
message DumpSnapshotsRequest {}
// Response reuses Snapshot; is_coarse distinguishes fine (1-min) from coarse (5-min) buckets.
// Stream closes after all historical data is sent (unlike StreamSnapshots which stays open).
service LogtailService {
rpc TopN(TopNRequest) returns (TopNResponse);
rpc Trend(TrendRequest) returns (TrendResponse);
rpc StreamSnapshots(SnapshotRequest) returns (stream Snapshot);
rpc ListTargets(ListTargetsRequest) returns (ListTargetsResponse);
rpc DumpSnapshots(DumpSnapshotsRequest) returns (stream Snapshot);
}
// Both collector and aggregator implement LogtailService.
// The aggregator's StreamSnapshots re-streams the merged view.
// ListTargets: aggregator returns all configured collectors; collector returns itself.
// DumpSnapshots: collector only; aggregator calls this on startup to backfill its ring.
```
## Program 1 — Collector
### tailer.go
- **`MultiTailer`**: one shared `fsnotify.Watcher` for all files regardless of count — avoids
the inotify instance limit when tailing hundreds of files.
- On `WRITE` event: read all new lines from that file's `bufio.Reader`.
- On `RENAME`/`REMOVE` (logrotate): drain old fd to EOF, close, start retry-open goroutine with
exponential backoff. Sends the new `*os.File` back via a channel to keep map access single-threaded.
- Emits `LogRecord` structs on a shared buffered channel (capacity 200 K — absorbs ~20 s of peak).
- Accepts paths via `--logs` (comma-separated or glob) and `--logs-file` (one path/glob per line).
### parser.go
- Parses the fixed **logtail** nginx log format — tab-separated, fixed field order, no quoting:
```nginx
log_format logtail '$host\t$remote_addr\t$msec\t$request_method\t$request_uri\t$status\t$body_bytes_sent\t$request_time\t$is_tor\t$asn';
```
| # | Field | Used for |
|---|-------------------|------------------|
| 0 | `$host` | website |
| 1 | `$remote_addr` | client_prefix |
| 2 | `$msec` | (discarded) |
| 3 | `$request_method` | (discarded) |
| 4 | `$request_uri` | http_request_uri |
| 5 | `$status` | http_response |
| 6 | `$body_bytes_sent`| (discarded) |
| 7 | `$request_time` | (discarded) |
| 8 | `$is_tor` | is_tor |
| 9 | `$asn` | asn |
- `strings.SplitN(line, "\t", 10)` — ~50 ns/line. No regex.
- `$request_uri`: query string discarded at first `?`.
- `$remote_addr`: truncated to /24 (IPv4) or /48 (IPv6); prefix lengths configurable via flags.
- `$is_tor`: `1` if the client IP is a TOR exit node, `0` otherwise. Field is optional — lines
with exactly 8 fields (old format) are accepted and default to `is_tor=false`.
- `$asn`: client AS number as a decimal integer (from MaxMind GeoIP2). Field is optional —
lines without it default to `asn=0`.
- Lines with fewer than 8 fields are silently skipped.
### store.go
- **Single aggregator goroutine** reads from the channel and updates the live map — no locking on
the hot path. At 10 K lines/s the goroutine uses <1% CPU.
- Live map: `map[Tuple6]int64`, hard-capped at 100 K entries (new keys dropped when full).
- **Minute ticker**: heap-selects top-50K entries, writes snapshot to fine ring, resets live map.
- Every 5 fine ticks: merge last 5 fine snapshots → top-5K → write to coarse ring.
- **TopN query**: RLock ring, sum bucket range, apply filter, group by dimension, heap-select top N.
- **Trend query**: per-bucket filtered sum, returns one `TrendPoint` per bucket.
- **Subscriber fan-out**: per-subscriber buffered channel; `Subscribe`/`Unsubscribe` for streaming.
- **`DumpRings()`**: acquires `RLock`, copies both ring arrays and their head/filled pointers
(just slice headers — microseconds), releases lock, then returns chronologically-ordered fine
and coarse snapshot slices. The lock is never held during serialisation or network I/O.
### server.go
- gRPC server on configurable port (default `:9090`).
- `TopN` and `Trend`: unary, answered from the ring buffer under RLock.
- `StreamSnapshots`: registers a subscriber channel; loops `Recv` on it; 30 s keepalive ticker.
- `DumpSnapshots`: calls `DumpRings()`, streams all fine buckets (`is_coarse=false`) then all
coarse buckets (`is_coarse=true`), then closes the stream. No lock held during streaming.
## Program 2 — Aggregator
### subscriber.go
- One goroutine per collector. Dials, calls `StreamSnapshots`, forwards each `Snapshot` to the
merger.
- Reconnects with exponential backoff (100 ms → doubles → cap 30 s).
- After 3 consecutive failures: calls `merger.Zero(addr)` to remove that collector's contribution
from the merged view (prevents stale counts accumulating during outages).
- Resets failure count on first successful `Recv`; logs recovery.
### merger.go
- **Delta strategy**: on each new snapshot from collector X, subtract X's previous entries from
`merged`, add the new entries, store new map. O(snapshot_size) per update — not
O(N_collectors × snapshot_size).
- `Zero(addr)`: subtracts the collector's last-known contribution and deletes its entry — called
when a collector is marked degraded.
### cache.go
- **Tick-based rotation** (1-min ticker, not snapshot-triggered): keeps the aggregator ring aligned
to the same 1-minute cadence as collectors regardless of how many collectors are connected.
- Same tiered ring structure as the collector store; populated from `merger.TopK()` each tick.
- `QueryTopN`, `QueryTrend`, `Subscribe`/`Unsubscribe` — identical interface to collector store.
- **`LoadHistorical(fine, coarse []Snapshot)`**: writes pre-merged backfill snapshots directly into
the ring arrays under `mu.Lock()`, sets head and filled counters, then returns. Safe to call
concurrently with queries. The live ticker continues from the updated head after this returns.
### backfill.go
- **`Backfill(ctx, collectorAddrs, cache)`**: called once at aggregator startup (in a goroutine,
after the gRPC server is already listening so the frontend is never blocked).
- Dials all collectors concurrently and calls `DumpSnapshots` on each.
- Accumulates entries per timestamp in `map[unix-second]map[label]count`; multiple collectors'
contributions for the same bucket are summed — the same delta-merge semantics as the live path.
- Sorts timestamps chronologically, runs `TopKFromMap` per bucket, caps to ring size.
- Calls `cache.LoadHistorical` once with the merged results.
- **Graceful degradation**: if a collector returns `Unimplemented` (old binary without
`DumpSnapshots`), logs an informational message and skips it — live streaming still starts
normally. Any other error is logged with timing and also skipped. Partial backfill (some
collectors succeed, some fail) is supported.
- Logs per-collector stats: bucket counts, total entry counts, and wall-clock duration.
### registry.go
- **`TargetRegistry`**: `sync.RWMutex`-protected `map[addr → name]`. Initialised with the
configured collector addresses; display names are updated from the `source` field of the first
snapshot received from each collector.
- `Targets()` returns a stable sorted slice of `{name, addr}` pairs for `ListTargets` responses.
### server.go
- Implements `LogtailService` backed by the cache (not live fan-out).
- `StreamSnapshots` re-streams merged fine snapshots; usable by a second-tier aggregator or
monitoring system.
- `ListTargets` returns the current `TargetRegistry` contents — all configured collectors with
their display names and gRPC addresses.
## Program 3 — Frontend
### handler.go
- All filter state in the **URL query string**: `w` (window), `by` (group_by), `f_website`,
`f_prefix`, `f_uri`, `f_status`, `f_website_re`, `f_uri_re`, `f_is_tor`, `f_asn`, `n`, `target`. No
server-side session — URLs are shareable and bookmarkable; multiple operators see independent views.
- **Filter expression box**: a `q=` parameter carries a mini filter language
(`status>=400 AND website~=gouda.* AND uri~=^/api/`). On submission the handler parses it
via `ParseFilterExpr` and redirects to the canonical URL with individual `f_*` params; `q=`
never appears in the final URL. Parse errors re-render the current page with an inline message.
- **Status expressions**: `f_status` accepts `200`, `!=200`, `>=400`, `<500`, etc. — parsed by
`store.ParseStatusExpr` into `(value, StatusOp)` for the filter protobuf.
- **ASN expressions**: `f_asn` accepts the same expression syntax (`12345`, `!=65000`, `>=1000`,
`<64512`, etc.) — also parsed by `store.ParseStatusExpr`, stored as `(asn_number, AsnOp)` in the
filter protobuf.
- **Regex filters**: `f_website_re` and `f_uri_re` hold RE2 patterns; compiled once per request
into `store.CompiledFilter` before the query-loop iteration. Invalid regexes match nothing.
- `TopN`, `Trend`, and `ListTargets` RPCs issued **concurrently** (all with a 5 s deadline); page
renders with whatever completes. Trend failure suppresses the sparkline; `ListTargets` failure
hides the source picker — both are non-fatal.
- **Source picker**: `ListTargets` result drives a `source:` tab row. Clicking a collector tab
sets `target=` to that collector's address, querying it directly. The "all" tab resets to the
default aggregator. Picker is hidden when `ListTargets` returns ≤0 collectors (direct collector
mode).
- **Drilldown**: clicking a table row adds the current dimension's filter and advances `by` through
`website → prefix → uri → status → asn → website` (cycles).
- **`raw=1`**: returns the TopN result as JSON — same URL, no CLI needed for scripting.
- **`target=` override**: per-request gRPC endpoint override for comparing sources.
- Error pages render at HTTP 502 with the window/group-by tabs still functional.
### sparkline.go
- `renderSparkline([]*pb.TrendPoint) template.HTML` — fixed `viewBox="0 0 300 60"` SVG,
Y-scaled to max count, rendered as `<polyline>`. Returns `""` for fewer than 2 points or
all-zero data.
### templates/
- `base.html`: outer shell, inline CSS (~40 lines), conditional `<meta http-equiv="refresh">`.
- `index.html`: window tabs, group-by tabs, filter breadcrumb with `×` remove links, sparkline,
TopN table with `<meter>` bars (% relative to rank-1), footer with source and refresh info.
- No external CSS, no web fonts, no JavaScript. Renders in w3m/lynx.
## Program 4 — CLI
### Subcommands
```
logtail-cli topn [flags] ranked label → count table (exits after one response)
logtail-cli trend [flags] per-bucket time series (exits after one response)
logtail-cli stream [flags] live snapshot feed (runs until Ctrl-C, auto-reconnects)
logtail-cli targets [flags] list targets known to the queried endpoint
```
### Flags
**Shared** (all subcommands):
| Flag | Default | Description |
|---------------|------------------|----------------------------------------------------------|
| `--target` | `localhost:9090` | Comma-separated `host:port` list; fan-out to all |
| `--json` | false | Emit newline-delimited JSON instead of a table |
| `--website` | — | Filter: website |
| `--prefix` | — | Filter: client prefix |
| `--uri` | — | Filter: request URI |
| `--status` | — | Filter: HTTP status expression (`200`, `!=200`, `>=400`, `<500`, …) |
| `--website-re`| — | Filter: RE2 regex against website |
| `--uri-re` | — | Filter: RE2 regex against request URI |
| `--is-tor` | — | Filter: TOR traffic (`1` or `!=0` = TOR only; `0` or `!=1` = non-TOR only) |
| `--asn` | — | Filter: ASN expression (`12345`, `!=65000`, `>=1000`, `<64512`, …) |
**`topn` only**: `--n 10`, `--window 5m`, `--group-by website`
**`trend` only**: `--window 5m`
### Multi-target fan-out
`--target` accepts a comma-separated list. All targets are queried concurrently; results are
printed in order with a per-target header. Single-target output omits the header for clean
pipe-to-`jq` use.
### Output
Default: human-readable table with space-separated thousands (`18 432`).
`--json`: a single JSON array (one object per target) for `topn` and `trend`; NDJSON for `stream` (unbounded).
`stream` reconnects automatically on error (5 s backoff). All other subcommands exit immediately
with a non-zero code on gRPC error.
## Key Design Decisions
| Decision | Rationale |
|----------|-----------|
| Single aggregator goroutine in collector | Eliminates all map lock contention on the 10 K/s hot path |
| Hard cap live map at 100 K entries | Bounds memory regardless of DDoS cardinality explosion |
| Ring buffer of sorted snapshots (not raw maps) | TopN queries avoid re-sorting; merge is a single heap pass |
| Push-based streaming (collector → aggregator) | Aggregator cache always fresh; query latency is cache-read only |
| Delta merge in aggregator | O(snapshot_size) per update, not O(N_collectors × size) |
| Tick-based cache rotation in aggregator | Ring stays on the same 1-min cadence regardless of collector count |
| Degraded collector zeroing | Stale counts from failed collectors don't accumulate in the merged view |
| Same `LogtailService` for collector and aggregator | CLI and frontend work with either; no special-casing |
| `internal/store` shared package | ring-buffer, `Tuple6` encoding, and filter logic shared between collector and aggregator |
| Filter state in URL, not session cookie | Multiple concurrent operators; shareable/bookmarkable URLs |
| Query strings stripped at ingest | Major cardinality reduction; prevents URI explosion under attack |
| No persistent storage | Simplicity; acceptable for ops dashboards (restart = lose history) |
| Trusted internal network, no TLS | Reduces operational complexity; add a TLS proxy if needed later |
| Server-side SVG sparklines, meta-refresh | Zero JS dependencies; works in terminal browsers and curl |
| CLI default: human-readable table | Operator-friendly by default; `--json` opt-in for scripting |
| CLI multi-target fan-out | Compare a collector vs. aggregator, or two collectors, in one command |
| CLI uses stdlib `flag`, no framework | Four subcommands don't justify a dependency |
| Status filter as expression string (`!=200`, `>=400`) | Operator-friendly; parsed once at query boundary, encoded as `(int32, StatusOp)` in proto |
| ASN filter reuses `StatusOp` and `ParseStatusExpr` | Same 6-operator grammar as status; no duplicate enum or parser needed |
| Regex filters compiled once per query (`CompiledFilter`) | Up to 288 × 5 000 per-entry calls — compiling per-entry would dominate query latency |
| Filter expression box (`q=`) redirects to canonical URL | Filter state stays in individual `f_*` params; URLs remain shareable and bookmarkable |
| `ListTargets` + frontend source picker | "Which nginx is busiest?" answered by switching `target=` to a collector; no data model changes, no extra memory |
| Backfill via `DumpSnapshots` on restart | Aggregator recovers full 24h ring from collectors on restart; gRPC server starts first so frontend is never blocked during backfill |
| `DumpRings()` copies under lock, streams without lock | Lock held for microseconds (slice-header copy only); network I/O happens outside the lock so minute rotation is never delayed |
| Backfill merges per-timestamp across collectors | Correct cross-collector sums per bucket, same semantics as live delta-merge; collectors that don't support `DumpSnapshots` are skipped gracefully |

View File

@@ -27,7 +27,7 @@ Add the `logtail` log format to your `nginx.conf` and apply it to each `server`
```nginx ```nginx
http { http {
log_format logtail '$host\t$remote_addr\t$msec\t$request_method\t$request_uri\t$status\t$body_bytes_sent\t$request_time'; log_format logtail '$host\t$remote_addr\t$msec\t$request_method\t$request_uri\t$status\t$body_bytes_sent\t$request_time\t$is_tor\t$asn';
server { server {
access_log /var/log/nginx/access.log logtail; access_log /var/log/nginx/access.log logtail;
@@ -40,6 +40,15 @@ http {
The format is tab-separated with fixed field positions. Query strings are stripped from the URI The format is tab-separated with fixed field positions. Query strings are stripped from the URI
by the collector at ingest time — only the path is tracked. by the collector at ingest time — only the path is tracked.
`$is_tor` must be set to `1` when the client IP is a TOR exit node and `0` otherwise (typically
populated by a custom nginx variable or a Lua script that checks the IP against a TOR exit list).
The field is optional for backward compatibility — log lines without it are accepted and treated
as `is_tor=0`.
`$asn` must be set to the client's AS number as a decimal integer (e.g. from MaxMind GeoIP2's
`$geoip2_data_autonomous_system_number`). The field is optional — log lines without it default
to `asn=0`.
--- ---
## Building ## Building
@@ -64,14 +73,16 @@ windows, and exposes a gRPC interface for the aggregator (and directly for the C
### Flags ### Flags
| Flag | Default | Description | | Flag | Default | Description |
|----------------|--------------|-----------------------------------------------------------| |-------------------|--------------|-----------------------------------------------------------|
| `--listen` | `:9090` | gRPC listen address | | `--listen` | `:9090` | gRPC listen address |
| `--logs` | — | Comma-separated log file paths or glob patterns | | `--prom-listen` | `:9100` | Prometheus metrics address; empty string to disable |
| `--logs-file` | — | File containing one log path/glob per line | | `--logs` | — | Comma-separated log file paths or glob patterns |
| `--source` | hostname | Name for this collector in query responses | | `--logs-file` | — | File containing one log path/glob per line |
| `--v4prefix` | `24` | IPv4 prefix length for client bucketing (e.g. /24 → /23) | | `--source` | hostname | Name for this collector in query responses |
| `--v6prefix` | `48` | IPv6 prefix length for client bucketing | | `--v4prefix` | `24` | IPv4 prefix length for client bucketing (e.g. /24 → /23) |
| `--v6prefix` | `48` | IPv6 prefix length for client bucketing |
| `--scan-interval` | `10s` | How often to rescan glob patterns for new/removed files |
At least one of `--logs` or `--logs-file` is required. At least one of `--logs` or `--logs-file` is required.
@@ -113,6 +124,73 @@ The collector handles logrotate automatically. On `RENAME`/`REMOVE` events it dr
descriptor to EOF (so no lines are lost), then retries opening the original path with backoff until descriptor to EOF (so no lines are lost), then retries opening the original path with backoff until
the new file appears. No restart or SIGHUP required. the new file appears. No restart or SIGHUP required.
### Prometheus metrics
The collector exposes a Prometheus-compatible `/metrics` endpoint on `--prom-listen` (default
`:9100`). Set `--prom-listen ""` to disable it entirely.
Three metrics are exported:
**`nginx_http_requests_total`** — counter, labeled `{host, method, status}`:
```
nginx_http_requests_total{host="example.com",method="GET",status="200"} 18432
nginx_http_requests_total{host="example.com",method="POST",status="201"} 304
nginx_http_requests_total{host="api.example.com",method="GET",status="429"} 57
```
**`nginx_http_response_body_bytes`** — histogram, labeled `{host}`. Observes the
`$body_bytes_sent` value for every request. Bucket upper bounds (bytes):
`256, 1024, 4096, 16384, 65536, 262144, 1048576, +Inf`.
**`nginx_http_request_duration_seconds`** — histogram, labeled `{host}`. Observes the
`$request_time` value for every request. Bucket upper bounds (seconds):
`0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, +Inf`.
Body and request-time histograms use only the `host` label (not method/status) to keep
cardinality bounded — the label sets stay proportional to the number of virtual hosts, not
the number of unique method × status combinations.
The counter map is capped at 100 000 distinct `{host, method, status}` tuples. Entries beyond
the cap are silently dropped for the current scrape interval, so memory is bounded regardless
of traffic patterns.
**Prometheus scrape config:**
```yaml
scrape_configs:
- job_name: nginx_logtail
static_configs:
- targets:
- nginx1:9100
- nginx2:9100
- nginx3:9100
```
Or with service discovery — the collector has no special requirements beyond a reachable
TCP port.
**Example queries:**
```promql
# Request rate per host over last 5 minutes
rate(nginx_http_requests_total[5m])
# 5xx error rate fraction per host
sum by (host) (rate(nginx_http_requests_total{status=~"5.."}[5m]))
/
sum by (host) (rate(nginx_http_requests_total[5m]))
# 95th percentile response time per host
histogram_quantile(0.95,
sum by (host, le) (rate(nginx_http_request_duration_seconds_bucket[5m]))
)
# Median response body size per host
histogram_quantile(0.50,
sum by (host, le) (rate(nginx_http_response_body_bytes_bucket[5m]))
)
```
### Memory usage ### Memory usage
The collector is designed to stay well under 1 GB: The collector is designed to stay well under 1 GB:
@@ -124,7 +202,7 @@ The collector is designed to stay well under 1 GB:
| Coarse ring (288 × 5-min) | 288 × 5 000 | ~268 MB | | Coarse ring (288 × 5-min) | 288 × 5 000 | ~268 MB |
| **Total** | | **~845 MB** | | **Total** | | **~845 MB** |
When the live map reaches 100 000 distinct 4-tuples, new keys are dropped for the rest of that When the live map reaches 100 000 distinct 6-tuples, new keys are dropped for the rest of that
minute. Existing keys continue to accumulate counts. The cap resets at each minute rotation. minute. Existing keys continue to accumulate counts. The cap resets at each minute rotation.
### Time windows ### Time windows
@@ -248,13 +326,13 @@ the selected dimension and time window.
**Window tabs** — switch between `1m / 5m / 15m / 60m / 6h / 24h`. Only the window changes; **Window tabs** — switch between `1m / 5m / 15m / 60m / 6h / 24h`. Only the window changes;
all active filters are preserved. all active filters are preserved.
**Dimension tabs** — switch between grouping by `website / prefix / uri / status`. **Dimension tabs** — switch between grouping by `website / asn / prefix / status / uri`.
**Drilldown** — click any table row to add that value as a filter and advance to the next **Drilldown** — click any table row to add that value as a filter and advance to the next
dimension in the hierarchy: dimension in the hierarchy:
``` ```
website → client prefix → request URI → HTTP status → website (cycles) website → client prefix → request URI → HTTP status → ASN → website (cycles)
``` ```
Example: click `example.com` in the website view to see which client prefixes are hitting it; Example: click `example.com` in the website view to see which client prefixes are hitting it;
@@ -278,12 +356,20 @@ website=example.com AND prefix=1.2.3.0/24
Supported fields and operators: Supported fields and operators:
| Field | Operators | Example | | Field | Operators | Example |
|-----------|---------------------|----------------------------| |-----------|---------------------|-----------------------------------|
| `status` | `=` `!=` `>` `>=` `<` `<=` | `status>=400` | | `status` | `=` `!=` `>` `>=` `<` `<=` | `status>=400` |
| `website` | `=` `~=` | `website~=gouda.*` | | `website` | `=` `~=` | `website~=gouda.*` |
| `uri` | `=` `~=` | `uri~=^/api/` | | `uri` | `=` `~=` | `uri~=^/api/` |
| `prefix` | `=` | `prefix=1.2.3.0/24` | | `prefix` | `=` | `prefix=1.2.3.0/24` |
| `is_tor` | `=` `!=` | `is_tor=1`, `is_tor!=0` |
| `asn` | `=` `!=` `>` `>=` `<` `<=` | `asn=8298`, `asn>=1000` |
`is_tor=1` and `is_tor!=0` are equivalent (TOR traffic only). `is_tor=0` and `is_tor!=1` are
equivalent (non-TOR traffic only).
`asn` accepts the same comparison expressions as `status`. Use `asn=8298` to match a single AS,
`asn>=64512` to match the private-use ASN range, or `asn!=0` to exclude unresolved entries.
`~=` means RE2 regex match. Values with spaces or quotes may be wrapped in double or single `~=` means RE2 regex match. Values with spaces or quotes may be wrapped in double or single
quotes: `uri~="^/search\?q="`. quotes: `uri~="^/search\?q="`.
@@ -303,8 +389,8 @@ accept RE2 regular expressions. The breadcrumb strip shows them as `website~=gou
`uri~=^/api/` with the usual `×` remove link. `uri~=^/api/` with the usual `×` remove link.
**URL sharing** — all filter state is in the URL query string (`w`, `by`, `f_website`, **URL sharing** — all filter state is in the URL query string (`w`, `by`, `f_website`,
`f_prefix`, `f_uri`, `f_status`, `f_website_re`, `f_uri_re`, `n`). Copy the URL to share an `f_prefix`, `f_uri`, `f_status`, `f_website_re`, `f_uri_re`, `f_is_tor`, `f_asn`, `n`). Copy
exact view with another operator, or bookmark a recurring query. the URL to share an exact view with another operator, or bookmark a recurring query.
**JSON output** — append `&raw=1` to any URL to receive the TopN result as JSON instead of **JSON output** — append `&raw=1` to any URL to receive the TopN result as JSON instead of
HTML. Useful for scripting without the CLI binary: HTML. Useful for scripting without the CLI binary:
@@ -359,6 +445,8 @@ logtail-cli targets [flags] list targets known to the queried endpoint
| `--status` | — | Filter: HTTP status expression (`200`, `!=200`, `>=400`, `<500`, …) | | `--status` | — | Filter: HTTP status expression (`200`, `!=200`, `>=400`, `<500`, …) |
| `--website-re`| — | Filter: RE2 regex against website | | `--website-re`| — | Filter: RE2 regex against website |
| `--uri-re` | — | Filter: RE2 regex against request URI | | `--uri-re` | — | Filter: RE2 regex against request URI |
| `--is-tor` | — | Filter: `1` or `!=0` = TOR only; `0` or `!=1` = non-TOR only |
| `--asn` | — | Filter: ASN expression (`12345`, `!=65000`, `>=1000`, `<64512`, …) |
### `topn` flags ### `topn` flags
@@ -366,7 +454,7 @@ logtail-cli targets [flags] list targets known to the queried endpoint
|---------------|------------|----------------------------------------------------------| |---------------|------------|----------------------------------------------------------|
| `--n` | `10` | Number of entries | | `--n` | `10` | Number of entries |
| `--window` | `5m` | `1m` `5m` `15m` `60m` `6h` `24h` | | `--window` | `5m` | `1m` `5m` `15m` `60m` `6h` `24h` |
| `--group-by` | `website` | `website` `prefix` `uri` `status` | | `--group-by` | `website` | `website` `prefix` `uri` `status` `asn` |
### `trend` flags ### `trend` flags
@@ -394,9 +482,9 @@ RANK COUNT LABEL
1 18 432 example.com 1 18 432 example.com
``` ```
**JSON** (`--json`) — one object per target, suitable for `jq`: **JSON** (`--json`) — a single JSON array with one object per target, suitable for `jq`:
```json ```json
{"source":"agg-prod","target":"agg:9091","entries":[{"label":"example.com","count":18432},...]} [{"source":"agg-prod","target":"agg:9091","entries":[{"label":"example.com","count":18432},...]}]
``` ```
**`stream` JSON** — one object per snapshot received (NDJSON), runs until interrupted: **`stream` JSON** — one object per snapshot received (NDJSON), runs until interrupted:
@@ -438,7 +526,7 @@ logtail-cli topn --target agg:9091 --window 1m --group-by prefix --status 429 --
# Same query, pipe to jq for scripting # Same query, pipe to jq for scripting
logtail-cli topn --target agg:9091 --window 1m --group-by prefix --status 429 --n 20 \ logtail-cli topn --target agg:9091 --window 1m --group-by prefix --status 429 --n 20 \
--json | jq '.entries[0]' --json | jq '.[0].entries[0]'
# Which website has the most errors (4xx or 5xx) over the last 24h? # Which website has the most errors (4xx or 5xx) over the last 24h?
logtail-cli topn --target agg:9091 --window 24h --group-by website --status '>=400' logtail-cli topn --target agg:9091 --window 24h --group-by website --status '>=400'
@@ -455,6 +543,27 @@ logtail-cli topn --target agg:9091 --window 5m --website-re 'gouda.*'
# Filter by URI regex: all /api/ paths # Filter by URI regex: all /api/ paths
logtail-cli topn --target agg:9091 --window 5m --group-by uri --uri-re '^/api/' logtail-cli topn --target agg:9091 --window 5m --group-by uri --uri-re '^/api/'
# Show only TOR traffic — which websites are TOR clients hitting?
logtail-cli topn --target agg:9091 --window 5m --is-tor 1
# Show non-TOR traffic only — exclude exit nodes from the view
logtail-cli topn --target agg:9091 --window 5m --is-tor 0
# Top ASNs by request count over the last 5 minutes
logtail-cli topn --target agg:9091 --window 5m --group-by asn
# Which ASNs are generating the most 429s?
logtail-cli topn --target agg:9091 --window 5m --group-by asn --status 429
# Filter to traffic from a specific ASN
logtail-cli topn --target agg:9091 --window 5m --asn 8298
# Filter to traffic from private-use / unallocated ASNs
logtail-cli topn --target agg:9091 --window 5m --group-by prefix --asn '>=64512'
# Exclude unresolved entries (ASN 0) and show top source ASNs
logtail-cli topn --target agg:9091 --window 5m --group-by asn --asn '!=0'
# Compare two collectors side by side in one command # Compare two collectors side by side in one command
logtail-cli topn --target nginx1:9090,nginx2:9090 --window 5m logtail-cli topn --target nginx1:9090,nginx2:9090 --window 5m
@@ -462,7 +571,7 @@ logtail-cli topn --target nginx1:9090,nginx2:9090 --window 5m
logtail-cli topn --target nginx3:9090,agg:9091 --window 5m --group-by prefix logtail-cli topn --target nginx3:9090,agg:9091 --window 5m --group-by prefix
# Trend of total traffic over 6h (for a quick sparkline in the terminal) # Trend of total traffic over 6h (for a quick sparkline in the terminal)
logtail-cli trend --target agg:9091 --window 6h --json | jq '[.points[] | .count]' logtail-cli trend --target agg:9091 --window 6h --json | jq '.[0].points | [.[].count]'
# Watch live merged snapshots from the aggregator # Watch live merged snapshots from the aggregator
logtail-cli stream --target agg:9091 logtail-cli stream --target agg:9091

BIN
docs/frontend.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 300 KiB

View File

@@ -6,6 +6,7 @@ import (
"container/heap" "container/heap"
"log" "log"
"regexp" "regexp"
"strconv"
"time" "time"
pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb" pb "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"
@@ -13,19 +14,21 @@ import (
// Ring-buffer dimensions — shared between collector and aggregator. // Ring-buffer dimensions — shared between collector and aggregator.
const ( const (
FineRingSize = 60 // 60 × 1-min buckets → 1 hour FineRingSize = 60 // 60 × 1-min buckets → 1 hour
CoarseRingSize = 288 // 288 × 5-min buckets → 24 hours CoarseRingSize = 288 // 288 × 5-min buckets → 24 hours
FineTopK = 50_000 // entries kept per fine snapshot FineTopK = 50_000 // entries kept per fine snapshot
CoarseTopK = 5_000 // entries kept per coarse snapshot CoarseTopK = 5_000 // entries kept per coarse snapshot
CoarseEvery = 5 // fine ticks between coarse writes CoarseEvery = 5 // fine ticks between coarse writes
) )
// Tuple4 is the four-dimensional aggregation key. // Tuple6 is the aggregation key (website, prefix, URI, status, is_tor, asn).
type Tuple4 struct { type Tuple6 struct {
Website string Website string
Prefix string Prefix string
URI string URI string
Status string Status string
IsTor bool
ASN int32
} }
// Entry is a labelled count used in snapshots and query results. // Entry is a labelled count used in snapshots and query results.
@@ -73,21 +76,34 @@ func BucketsForWindow(window pb.Window, fine, coarse RingView, fineFilled, coars
} }
} }
// --- label encoding: "website\x00prefix\x00uri\x00status" --- // --- label encoding: "website\x00prefix\x00uri\x00status\x00is_tor\x00asn" ---
// EncodeTuple encodes a Tuple4 as a NUL-separated string suitable for use // EncodeTuple encodes a Tuple6 as a NUL-separated string suitable for use
// as a map key in snapshots. // as a map key in snapshots.
func EncodeTuple(t Tuple4) string { func EncodeTuple(t Tuple6) string {
return t.Website + "\x00" + t.Prefix + "\x00" + t.URI + "\x00" + t.Status tor := "0"
if t.IsTor {
tor = "1"
}
return t.Website + "\x00" + t.Prefix + "\x00" + t.URI + "\x00" + t.Status + "\x00" + tor + "\x00" + strconv.Itoa(int(t.ASN))
} }
// LabelTuple decodes a NUL-separated snapshot label back into a Tuple4. // LabelTuple decodes a NUL-separated snapshot label back into a Tuple6.
func LabelTuple(label string) Tuple4 { func LabelTuple(label string) Tuple6 {
parts := splitN(label, '\x00', 4) parts := splitN(label, '\x00', 6)
if len(parts) != 4 { if len(parts) < 4 {
return Tuple4{} return Tuple6{}
} }
return Tuple4{parts[0], parts[1], parts[2], parts[3]} t := Tuple6{Website: parts[0], Prefix: parts[1], URI: parts[2], Status: parts[3]}
if len(parts) >= 5 {
t.IsTor = parts[4] == "1"
}
if len(parts) == 6 {
if n, err := strconv.Atoi(parts[5]); err == nil {
t.ASN = int32(n)
}
}
return t
} }
func splitN(s string, sep byte, n int) []string { func splitN(s string, sep byte, n int) []string {
@@ -117,9 +133,11 @@ func indexOf(s string, b byte) int {
// CompiledFilter wraps a pb.Filter with pre-compiled regular expressions. // CompiledFilter wraps a pb.Filter with pre-compiled regular expressions.
// Use CompileFilter to construct one before a query loop. // Use CompileFilter to construct one before a query loop.
type CompiledFilter struct { type CompiledFilter struct {
Proto *pb.Filter Proto *pb.Filter
WebsiteRe *regexp.Regexp // nil if no website_regex or compilation failed WebsiteRe *regexp.Regexp // nil if no website_regex or compilation failed
URIRe *regexp.Regexp // nil if no uri_regex or compilation failed URIRe *regexp.Regexp // nil if no uri_regex or compilation failed
WebsiteReExcl *regexp.Regexp // nil if no website_regex_exclude or compilation failed
URIReExcl *regexp.Regexp // nil if no uri_regex_exclude or compilation failed
} }
// CompileFilter compiles the regex fields in f once. Invalid regexes are // CompileFilter compiles the regex fields in f once. Invalid regexes are
@@ -145,12 +163,28 @@ func CompileFilter(f *pb.Filter) *CompiledFilter {
cf.URIRe = re cf.URIRe = re
} }
} }
if f.WebsiteRegexExclude != nil {
re, err := regexp.Compile(f.GetWebsiteRegexExclude())
if err != nil {
log.Printf("store: invalid website_regex_exclude %q: %v", f.GetWebsiteRegexExclude(), err)
} else {
cf.WebsiteReExcl = re
}
}
if f.UriRegexExclude != nil {
re, err := regexp.Compile(f.GetUriRegexExclude())
if err != nil {
log.Printf("store: invalid uri_regex_exclude %q: %v", f.GetUriRegexExclude(), err)
} else {
cf.URIReExcl = re
}
}
return cf return cf
} }
// MatchesFilter returns true if t satisfies all constraints in f. // MatchesFilter returns true if t satisfies all constraints in f.
// A nil filter matches everything. // A nil filter matches everything.
func MatchesFilter(t Tuple4, f *CompiledFilter) bool { func MatchesFilter(t Tuple6, f *CompiledFilter) bool {
if f == nil || f.Proto == nil { if f == nil || f.Proto == nil {
return true return true
} }
@@ -177,12 +211,55 @@ func MatchesFilter(t Tuple4, f *CompiledFilter) bool {
if p.UriRegex != nil && f.URIRe == nil { if p.UriRegex != nil && f.URIRe == nil {
return false return false
} }
if f.WebsiteReExcl != nil && f.WebsiteReExcl.MatchString(t.Website) {
return false
}
if p.WebsiteRegexExclude != nil && f.WebsiteReExcl == nil {
return false
}
if f.URIReExcl != nil && f.URIReExcl.MatchString(t.URI) {
return false
}
if p.UriRegexExclude != nil && f.URIReExcl == nil {
return false
}
if p.HttpResponse != nil && !matchesStatusOp(t.Status, p.GetHttpResponse(), p.StatusOp) { if p.HttpResponse != nil && !matchesStatusOp(t.Status, p.GetHttpResponse(), p.StatusOp) {
return false return false
} }
switch p.Tor {
case pb.TorFilter_TOR_YES:
if !t.IsTor {
return false
}
case pb.TorFilter_TOR_NO:
if t.IsTor {
return false
}
}
if p.AsnNumber != nil && !matchesAsnOp(t.ASN, p.GetAsnNumber(), p.AsnOp) {
return false
}
return true return true
} }
// matchesAsnOp applies op(asn, want) directly on int32 values.
func matchesAsnOp(asn, want int32, op pb.StatusOp) bool {
switch op {
case pb.StatusOp_NE:
return asn != want
case pb.StatusOp_GT:
return asn > want
case pb.StatusOp_GE:
return asn >= want
case pb.StatusOp_LT:
return asn < want
case pb.StatusOp_LE:
return asn <= want
default: // EQ
return asn == want
}
}
// matchesStatusOp applies op(statusStr, want), parsing statusStr as an integer. // matchesStatusOp applies op(statusStr, want), parsing statusStr as an integer.
// Returns false if statusStr is not a valid integer. // Returns false if statusStr is not a valid integer.
func matchesStatusOp(statusStr string, want int32, op pb.StatusOp) bool { func matchesStatusOp(statusStr string, want int32, op pb.StatusOp) bool {
@@ -210,7 +287,7 @@ func matchesStatusOp(statusStr string, want int32, op pb.StatusOp) bool {
} }
// DimensionLabel returns the string value of t for the given group-by dimension. // DimensionLabel returns the string value of t for the given group-by dimension.
func DimensionLabel(t Tuple4, g pb.GroupBy) string { func DimensionLabel(t Tuple6, g pb.GroupBy) string {
switch g { switch g {
case pb.GroupBy_WEBSITE: case pb.GroupBy_WEBSITE:
return t.Website return t.Website
@@ -220,6 +297,8 @@ func DimensionLabel(t Tuple4, g pb.GroupBy) string {
return t.URI return t.URI
case pb.GroupBy_HTTP_RESPONSE: case pb.GroupBy_HTTP_RESPONSE:
return t.Status return t.Status
case pb.GroupBy_ASN_NUMBER:
return strconv.Itoa(int(t.ASN))
default: default:
return t.Website return t.Website
} }
@@ -299,9 +378,9 @@ func TopKFromMap(m map[string]int64, k int) []Entry {
return result return result
} }
// TopKFromTupleMap encodes a Tuple4 map and returns the top-k as a Snapshot. // TopKFromTupleMap encodes a Tuple6 map and returns the top-k as a Snapshot.
// Used by the collector to snapshot its live map. // Used by the collector to snapshot its live map.
func TopKFromTupleMap(m map[Tuple4]int64, k int, ts time.Time) Snapshot { func TopKFromTupleMap(m map[Tuple6]int64, k int, ts time.Time) Snapshot {
flat := make(map[string]int64, len(m)) flat := make(map[string]int64, len(m))
for t, c := range m { for t, c := range m {
flat[EncodeTuple(t)] = c flat[EncodeTuple(t)] = c

View File

@@ -83,10 +83,10 @@ func compiledEQ(status int32) *CompiledFilter {
} }
func TestMatchesFilterNil(t *testing.T) { func TestMatchesFilterNil(t *testing.T) {
if !MatchesFilter(Tuple4{Website: "x"}, nil) { if !MatchesFilter(Tuple6{Website: "x"}, nil) {
t.Fatal("nil filter should match everything") t.Fatal("nil filter should match everything")
} }
if !MatchesFilter(Tuple4{Website: "x"}, &CompiledFilter{}) { if !MatchesFilter(Tuple6{Website: "x"}, &CompiledFilter{}) {
t.Fatal("empty compiled filter should match everything") t.Fatal("empty compiled filter should match everything")
} }
} }
@@ -94,10 +94,10 @@ func TestMatchesFilterNil(t *testing.T) {
func TestMatchesFilterExactWebsite(t *testing.T) { func TestMatchesFilterExactWebsite(t *testing.T) {
w := "example.com" w := "example.com"
cf := CompileFilter(&pb.Filter{Website: &w}) cf := CompileFilter(&pb.Filter{Website: &w})
if !MatchesFilter(Tuple4{Website: "example.com"}, cf) { if !MatchesFilter(Tuple6{Website: "example.com"}, cf) {
t.Fatal("expected match") t.Fatal("expected match")
} }
if MatchesFilter(Tuple4{Website: "other.com"}, cf) { if MatchesFilter(Tuple6{Website: "other.com"}, cf) {
t.Fatal("expected no match") t.Fatal("expected no match")
} }
} }
@@ -105,10 +105,10 @@ func TestMatchesFilterExactWebsite(t *testing.T) {
func TestMatchesFilterWebsiteRegex(t *testing.T) { func TestMatchesFilterWebsiteRegex(t *testing.T) {
re := "gouda.*" re := "gouda.*"
cf := CompileFilter(&pb.Filter{WebsiteRegex: &re}) cf := CompileFilter(&pb.Filter{WebsiteRegex: &re})
if !MatchesFilter(Tuple4{Website: "gouda.example.com"}, cf) { if !MatchesFilter(Tuple6{Website: "gouda.example.com"}, cf) {
t.Fatal("expected match") t.Fatal("expected match")
} }
if MatchesFilter(Tuple4{Website: "edam.example.com"}, cf) { if MatchesFilter(Tuple6{Website: "edam.example.com"}, cf) {
t.Fatal("expected no match") t.Fatal("expected no match")
} }
} }
@@ -116,10 +116,10 @@ func TestMatchesFilterWebsiteRegex(t *testing.T) {
func TestMatchesFilterURIRegex(t *testing.T) { func TestMatchesFilterURIRegex(t *testing.T) {
re := "^/api/.*" re := "^/api/.*"
cf := CompileFilter(&pb.Filter{UriRegex: &re}) cf := CompileFilter(&pb.Filter{UriRegex: &re})
if !MatchesFilter(Tuple4{URI: "/api/users"}, cf) { if !MatchesFilter(Tuple6{URI: "/api/users"}, cf) {
t.Fatal("expected match") t.Fatal("expected match")
} }
if MatchesFilter(Tuple4{URI: "/health"}, cf) { if MatchesFilter(Tuple6{URI: "/health"}, cf) {
t.Fatal("expected no match") t.Fatal("expected no match")
} }
} }
@@ -127,17 +127,17 @@ func TestMatchesFilterURIRegex(t *testing.T) {
func TestMatchesFilterInvalidRegexMatchesNothing(t *testing.T) { func TestMatchesFilterInvalidRegexMatchesNothing(t *testing.T) {
re := "[invalid" re := "[invalid"
cf := CompileFilter(&pb.Filter{WebsiteRegex: &re}) cf := CompileFilter(&pb.Filter{WebsiteRegex: &re})
if MatchesFilter(Tuple4{Website: "anything"}, cf) { if MatchesFilter(Tuple6{Website: "anything"}, cf) {
t.Fatal("invalid regex should match nothing") t.Fatal("invalid regex should match nothing")
} }
} }
func TestMatchesFilterStatusEQ(t *testing.T) { func TestMatchesFilterStatusEQ(t *testing.T) {
cf := compiledEQ(200) cf := compiledEQ(200)
if !MatchesFilter(Tuple4{Status: "200"}, cf) { if !MatchesFilter(Tuple6{Status: "200"}, cf) {
t.Fatal("expected match") t.Fatal("expected match")
} }
if MatchesFilter(Tuple4{Status: "404"}, cf) { if MatchesFilter(Tuple6{Status: "404"}, cf) {
t.Fatal("expected no match") t.Fatal("expected no match")
} }
} }
@@ -145,10 +145,10 @@ func TestMatchesFilterStatusEQ(t *testing.T) {
func TestMatchesFilterStatusNE(t *testing.T) { func TestMatchesFilterStatusNE(t *testing.T) {
v := int32(200) v := int32(200)
cf := CompileFilter(&pb.Filter{HttpResponse: &v, StatusOp: pb.StatusOp_NE}) cf := CompileFilter(&pb.Filter{HttpResponse: &v, StatusOp: pb.StatusOp_NE})
if MatchesFilter(Tuple4{Status: "200"}, cf) { if MatchesFilter(Tuple6{Status: "200"}, cf) {
t.Fatal("expected no match for 200 != 200") t.Fatal("expected no match for 200 != 200")
} }
if !MatchesFilter(Tuple4{Status: "404"}, cf) { if !MatchesFilter(Tuple6{Status: "404"}, cf) {
t.Fatal("expected match for 404 != 200") t.Fatal("expected match for 404 != 200")
} }
} }
@@ -156,13 +156,13 @@ func TestMatchesFilterStatusNE(t *testing.T) {
func TestMatchesFilterStatusGE(t *testing.T) { func TestMatchesFilterStatusGE(t *testing.T) {
v := int32(400) v := int32(400)
cf := CompileFilter(&pb.Filter{HttpResponse: &v, StatusOp: pb.StatusOp_GE}) cf := CompileFilter(&pb.Filter{HttpResponse: &v, StatusOp: pb.StatusOp_GE})
if !MatchesFilter(Tuple4{Status: "400"}, cf) { if !MatchesFilter(Tuple6{Status: "400"}, cf) {
t.Fatal("expected match: 400 >= 400") t.Fatal("expected match: 400 >= 400")
} }
if !MatchesFilter(Tuple4{Status: "500"}, cf) { if !MatchesFilter(Tuple6{Status: "500"}, cf) {
t.Fatal("expected match: 500 >= 400") t.Fatal("expected match: 500 >= 400")
} }
if MatchesFilter(Tuple4{Status: "200"}, cf) { if MatchesFilter(Tuple6{Status: "200"}, cf) {
t.Fatal("expected no match: 200 >= 400") t.Fatal("expected no match: 200 >= 400")
} }
} }
@@ -170,17 +170,17 @@ func TestMatchesFilterStatusGE(t *testing.T) {
func TestMatchesFilterStatusLT(t *testing.T) { func TestMatchesFilterStatusLT(t *testing.T) {
v := int32(400) v := int32(400)
cf := CompileFilter(&pb.Filter{HttpResponse: &v, StatusOp: pb.StatusOp_LT}) cf := CompileFilter(&pb.Filter{HttpResponse: &v, StatusOp: pb.StatusOp_LT})
if !MatchesFilter(Tuple4{Status: "200"}, cf) { if !MatchesFilter(Tuple6{Status: "200"}, cf) {
t.Fatal("expected match: 200 < 400") t.Fatal("expected match: 200 < 400")
} }
if MatchesFilter(Tuple4{Status: "400"}, cf) { if MatchesFilter(Tuple6{Status: "400"}, cf) {
t.Fatal("expected no match: 400 < 400") t.Fatal("expected no match: 400 < 400")
} }
} }
func TestMatchesFilterStatusNonNumeric(t *testing.T) { func TestMatchesFilterStatusNonNumeric(t *testing.T) {
cf := compiledEQ(200) cf := compiledEQ(200)
if MatchesFilter(Tuple4{Status: "ok"}, cf) { if MatchesFilter(Tuple6{Status: "ok"}, cf) {
t.Fatal("non-numeric status should not match") t.Fatal("non-numeric status should not match")
} }
} }
@@ -193,13 +193,145 @@ func TestMatchesFilterCombined(t *testing.T) {
HttpResponse: &v, HttpResponse: &v,
StatusOp: pb.StatusOp_EQ, StatusOp: pb.StatusOp_EQ,
}) })
if !MatchesFilter(Tuple4{Website: "example.com", Status: "200"}, cf) { if !MatchesFilter(Tuple6{Website: "example.com", Status: "200"}, cf) {
t.Fatal("expected match") t.Fatal("expected match")
} }
if MatchesFilter(Tuple4{Website: "other.com", Status: "200"}, cf) { if MatchesFilter(Tuple6{Website: "other.com", Status: "200"}, cf) {
t.Fatal("expected no match: wrong website") t.Fatal("expected no match: wrong website")
} }
if MatchesFilter(Tuple4{Website: "example.com", Status: "404"}, cf) { if MatchesFilter(Tuple6{Website: "example.com", Status: "404"}, cf) {
t.Fatal("expected no match: wrong status") t.Fatal("expected no match: wrong status")
} }
} }
// --- IsTor label encoding and filtering ---
func TestEncodeLabelTupleRoundtripWithTor(t *testing.T) {
for _, isTor := range []bool{false, true} {
orig := Tuple6{Website: "a.com", Prefix: "1.2.3.0/24", URI: "/x", Status: "200", IsTor: isTor}
got := LabelTuple(EncodeTuple(orig))
if got != orig {
t.Errorf("roundtrip mismatch: got %+v, want %+v", got, orig)
}
}
}
func TestLabelTupleBackwardCompat(t *testing.T) {
// Old 4-field label (no is_tor field) should decode with IsTor=false.
label := "a.com\x001.2.3.0/24\x00/x\x00200"
got := LabelTuple(label)
if got.IsTor {
t.Errorf("expected IsTor=false for old label, got true")
}
if got.Website != "a.com" || got.Status != "200" {
t.Errorf("unexpected tuple: %+v", got)
}
}
func TestMatchesFilterTorYes(t *testing.T) {
cf := CompileFilter(&pb.Filter{Tor: pb.TorFilter_TOR_YES})
if !MatchesFilter(Tuple6{IsTor: true}, cf) {
t.Fatal("TOR_YES should match TOR tuple")
}
if MatchesFilter(Tuple6{IsTor: false}, cf) {
t.Fatal("TOR_YES should not match non-TOR tuple")
}
}
func TestMatchesFilterTorNo(t *testing.T) {
cf := CompileFilter(&pb.Filter{Tor: pb.TorFilter_TOR_NO})
if !MatchesFilter(Tuple6{IsTor: false}, cf) {
t.Fatal("TOR_NO should match non-TOR tuple")
}
if MatchesFilter(Tuple6{IsTor: true}, cf) {
t.Fatal("TOR_NO should not match TOR tuple")
}
}
func TestMatchesFilterTorAny(t *testing.T) {
cf := CompileFilter(&pb.Filter{Tor: pb.TorFilter_TOR_ANY})
if !MatchesFilter(Tuple6{IsTor: true}, cf) {
t.Fatal("TOR_ANY should match TOR tuple")
}
if !MatchesFilter(Tuple6{IsTor: false}, cf) {
t.Fatal("TOR_ANY should match non-TOR tuple")
}
}
// --- ASN label encoding, filtering, and DimensionLabel ---
func TestEncodeLabelTupleRoundtripWithASN(t *testing.T) {
for _, asn := range []int32{0, 1, 12345, 65535} {
orig := Tuple6{Website: "a.com", Prefix: "1.2.3.0/24", URI: "/x", Status: "200", ASN: asn}
got := LabelTuple(EncodeTuple(orig))
if got != orig {
t.Errorf("roundtrip mismatch for ASN=%d: got %+v, want %+v", asn, got, orig)
}
}
}
func TestLabelTupleBackwardCompatNoASN(t *testing.T) {
// 5-field label (no asn field) should decode with ASN=0.
label := "a.com\x001.2.3.0/24\x00/x\x00200\x000"
got := LabelTuple(label)
if got.ASN != 0 {
t.Errorf("expected ASN=0 for 5-field label, got %d", got.ASN)
}
}
func TestMatchesFilterAsnEQ(t *testing.T) {
n := int32(12345)
cf := CompileFilter(&pb.Filter{AsnNumber: &n})
if !MatchesFilter(Tuple6{ASN: 12345}, cf) {
t.Fatal("EQ should match equal ASN")
}
if MatchesFilter(Tuple6{ASN: 99999}, cf) {
t.Fatal("EQ should not match different ASN")
}
}
func TestMatchesFilterAsnNE(t *testing.T) {
n := int32(12345)
cf := CompileFilter(&pb.Filter{AsnNumber: &n, AsnOp: pb.StatusOp_NE})
if MatchesFilter(Tuple6{ASN: 12345}, cf) {
t.Fatal("NE should not match equal ASN")
}
if !MatchesFilter(Tuple6{ASN: 99999}, cf) {
t.Fatal("NE should match different ASN")
}
}
func TestMatchesFilterAsnGE(t *testing.T) {
n := int32(1000)
cf := CompileFilter(&pb.Filter{AsnNumber: &n, AsnOp: pb.StatusOp_GE})
if !MatchesFilter(Tuple6{ASN: 1000}, cf) {
t.Fatal("GE should match equal ASN")
}
if !MatchesFilter(Tuple6{ASN: 2000}, cf) {
t.Fatal("GE should match larger ASN")
}
if MatchesFilter(Tuple6{ASN: 500}, cf) {
t.Fatal("GE should not match smaller ASN")
}
}
func TestMatchesFilterAsnLT(t *testing.T) {
n := int32(64512)
cf := CompileFilter(&pb.Filter{AsnNumber: &n, AsnOp: pb.StatusOp_LT})
if !MatchesFilter(Tuple6{ASN: 1000}, cf) {
t.Fatal("LT should match smaller ASN")
}
if MatchesFilter(Tuple6{ASN: 64512}, cf) {
t.Fatal("LT should not match equal ASN")
}
if MatchesFilter(Tuple6{ASN: 65535}, cf) {
t.Fatal("LT should not match larger ASN")
}
}
func TestDimensionLabelASN(t *testing.T) {
got := DimensionLabel(Tuple6{ASN: 12345}, pb.GroupBy_ASN_NUMBER)
if got != "12345" {
t.Errorf("DimensionLabel ASN: got %q, want %q", got, "12345")
}
}

View File

@@ -4,6 +4,14 @@ package logtail;
option go_package = "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb"; option go_package = "git.ipng.ch/ipng/nginx-logtail/proto/logtailpb";
// TorFilter restricts results by whether the client is a TOR exit node.
// TOR_ANY (0) is the default and means no filtering.
enum TorFilter {
TOR_ANY = 0; // no filter
TOR_YES = 1; // only TOR traffic (is_tor=1)
TOR_NO = 2; // only non-TOR traffic (is_tor=0)
}
// StatusOp is the comparison operator applied to http_response in a Filter. // StatusOp is the comparison operator applied to http_response in a Filter.
// Defaults to EQ (exact match) for backward compatibility. // Defaults to EQ (exact match) for backward compatibility.
enum StatusOp { enum StatusOp {
@@ -23,8 +31,13 @@ message Filter {
optional string http_request_uri = 3; optional string http_request_uri = 3;
optional int32 http_response = 4; optional int32 http_response = 4;
StatusOp status_op = 5; // operator for http_response; ignored when unset StatusOp status_op = 5; // operator for http_response; ignored when unset
optional string website_regex = 6; // RE2 regex matched against website optional string website_regex = 6; // RE2 regex matched against website
optional string uri_regex = 7; // RE2 regex matched against http_request_uri optional string uri_regex = 7; // RE2 regex matched against http_request_uri
optional string website_regex_exclude = 11; // RE2 regex; entries matching this are excluded
optional string uri_regex_exclude = 12; // RE2 regex; entries matching this are excluded
TorFilter tor = 8; // restrict to TOR / non-TOR clients
optional int32 asn_number = 9; // filter by client ASN
StatusOp asn_op = 10; // operator for asn_number; ignored when unset
} }
enum GroupBy { enum GroupBy {
@@ -32,6 +45,7 @@ enum GroupBy {
CLIENT_PREFIX = 1; CLIENT_PREFIX = 1;
REQUEST_URI = 2; REQUEST_URI = 2;
HTTP_RESPONSE = 3; HTTP_RESPONSE = 3;
ASN_NUMBER = 4;
} }
enum Window { enum Window {
@@ -87,8 +101,15 @@ message Snapshot {
string source = 1; string source = 1;
int64 timestamp = 2; int64 timestamp = 2;
repeated TopNEntry entries = 3; // top-50K for this 1-minute bucket, sorted desc repeated TopNEntry entries = 3; // top-50K for this 1-minute bucket, sorted desc
bool is_coarse = 4; // true for coarse-ring (5-min) buckets in DumpSnapshots
} }
// DumpSnapshots — returns all ring buffer contents for backfill on aggregator restart.
// Streams fine-ring buckets (is_coarse=false) followed by coarse-ring buckets
// (is_coarse=true), then closes. The lock is held only for the initial copy.
message DumpSnapshotsRequest {}
// ListTargets — returns the targets this node knows about. // ListTargets — returns the targets this node knows about.
// The aggregator returns all configured collectors; a collector returns itself. // The aggregator returns all configured collectors; a collector returns itself.
@@ -108,4 +129,5 @@ service LogtailService {
rpc Trend (TrendRequest) returns (TrendResponse); rpc Trend (TrendRequest) returns (TrendResponse);
rpc StreamSnapshots (SnapshotRequest) returns (stream Snapshot); rpc StreamSnapshots (SnapshotRequest) returns (stream Snapshot);
rpc ListTargets (ListTargetsRequest) returns (ListTargetsResponse); rpc ListTargets (ListTargetsRequest) returns (ListTargetsResponse);
rpc DumpSnapshots (DumpSnapshotsRequest) returns (stream Snapshot);
} }

View File

@@ -2,7 +2,7 @@
// versions: // versions:
// protoc-gen-go v1.36.11 // protoc-gen-go v1.36.11
// protoc v3.21.12 // protoc v3.21.12
// source: logtail.proto // source: proto/logtail.proto
package logtailpb package logtailpb
@@ -21,6 +21,57 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
) )
// TorFilter restricts results by whether the client is a TOR exit node.
// TOR_ANY (0) is the default and means no filtering.
type TorFilter int32
const (
TorFilter_TOR_ANY TorFilter = 0 // no filter
TorFilter_TOR_YES TorFilter = 1 // only TOR traffic (is_tor=1)
TorFilter_TOR_NO TorFilter = 2 // only non-TOR traffic (is_tor=0)
)
// Enum value maps for TorFilter.
var (
TorFilter_name = map[int32]string{
0: "TOR_ANY",
1: "TOR_YES",
2: "TOR_NO",
}
TorFilter_value = map[string]int32{
"TOR_ANY": 0,
"TOR_YES": 1,
"TOR_NO": 2,
}
)
func (x TorFilter) Enum() *TorFilter {
p := new(TorFilter)
*p = x
return p
}
func (x TorFilter) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TorFilter) Descriptor() protoreflect.EnumDescriptor {
return file_proto_logtail_proto_enumTypes[0].Descriptor()
}
func (TorFilter) Type() protoreflect.EnumType {
return &file_proto_logtail_proto_enumTypes[0]
}
func (x TorFilter) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TorFilter.Descriptor instead.
func (TorFilter) EnumDescriptor() ([]byte, []int) {
return file_proto_logtail_proto_rawDescGZIP(), []int{0}
}
// StatusOp is the comparison operator applied to http_response in a Filter. // StatusOp is the comparison operator applied to http_response in a Filter.
// Defaults to EQ (exact match) for backward compatibility. // Defaults to EQ (exact match) for backward compatibility.
type StatusOp int32 type StatusOp int32
@@ -65,11 +116,11 @@ func (x StatusOp) String() string {
} }
func (StatusOp) Descriptor() protoreflect.EnumDescriptor { func (StatusOp) Descriptor() protoreflect.EnumDescriptor {
return file_logtail_proto_enumTypes[0].Descriptor() return file_proto_logtail_proto_enumTypes[1].Descriptor()
} }
func (StatusOp) Type() protoreflect.EnumType { func (StatusOp) Type() protoreflect.EnumType {
return &file_logtail_proto_enumTypes[0] return &file_proto_logtail_proto_enumTypes[1]
} }
func (x StatusOp) Number() protoreflect.EnumNumber { func (x StatusOp) Number() protoreflect.EnumNumber {
@@ -78,7 +129,7 @@ func (x StatusOp) Number() protoreflect.EnumNumber {
// Deprecated: Use StatusOp.Descriptor instead. // Deprecated: Use StatusOp.Descriptor instead.
func (StatusOp) EnumDescriptor() ([]byte, []int) { func (StatusOp) EnumDescriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{0} return file_proto_logtail_proto_rawDescGZIP(), []int{1}
} }
type GroupBy int32 type GroupBy int32
@@ -88,6 +139,7 @@ const (
GroupBy_CLIENT_PREFIX GroupBy = 1 GroupBy_CLIENT_PREFIX GroupBy = 1
GroupBy_REQUEST_URI GroupBy = 2 GroupBy_REQUEST_URI GroupBy = 2
GroupBy_HTTP_RESPONSE GroupBy = 3 GroupBy_HTTP_RESPONSE GroupBy = 3
GroupBy_ASN_NUMBER GroupBy = 4
) )
// Enum value maps for GroupBy. // Enum value maps for GroupBy.
@@ -97,12 +149,14 @@ var (
1: "CLIENT_PREFIX", 1: "CLIENT_PREFIX",
2: "REQUEST_URI", 2: "REQUEST_URI",
3: "HTTP_RESPONSE", 3: "HTTP_RESPONSE",
4: "ASN_NUMBER",
} }
GroupBy_value = map[string]int32{ GroupBy_value = map[string]int32{
"WEBSITE": 0, "WEBSITE": 0,
"CLIENT_PREFIX": 1, "CLIENT_PREFIX": 1,
"REQUEST_URI": 2, "REQUEST_URI": 2,
"HTTP_RESPONSE": 3, "HTTP_RESPONSE": 3,
"ASN_NUMBER": 4,
} }
) )
@@ -117,11 +171,11 @@ func (x GroupBy) String() string {
} }
func (GroupBy) Descriptor() protoreflect.EnumDescriptor { func (GroupBy) Descriptor() protoreflect.EnumDescriptor {
return file_logtail_proto_enumTypes[1].Descriptor() return file_proto_logtail_proto_enumTypes[2].Descriptor()
} }
func (GroupBy) Type() protoreflect.EnumType { func (GroupBy) Type() protoreflect.EnumType {
return &file_logtail_proto_enumTypes[1] return &file_proto_logtail_proto_enumTypes[2]
} }
func (x GroupBy) Number() protoreflect.EnumNumber { func (x GroupBy) Number() protoreflect.EnumNumber {
@@ -130,7 +184,7 @@ func (x GroupBy) Number() protoreflect.EnumNumber {
// Deprecated: Use GroupBy.Descriptor instead. // Deprecated: Use GroupBy.Descriptor instead.
func (GroupBy) EnumDescriptor() ([]byte, []int) { func (GroupBy) EnumDescriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{1} return file_proto_logtail_proto_rawDescGZIP(), []int{2}
} }
type Window int32 type Window int32
@@ -175,11 +229,11 @@ func (x Window) String() string {
} }
func (Window) Descriptor() protoreflect.EnumDescriptor { func (Window) Descriptor() protoreflect.EnumDescriptor {
return file_logtail_proto_enumTypes[2].Descriptor() return file_proto_logtail_proto_enumTypes[3].Descriptor()
} }
func (Window) Type() protoreflect.EnumType { func (Window) Type() protoreflect.EnumType {
return &file_logtail_proto_enumTypes[2] return &file_proto_logtail_proto_enumTypes[3]
} }
func (x Window) Number() protoreflect.EnumNumber { func (x Window) Number() protoreflect.EnumNumber {
@@ -188,27 +242,32 @@ func (x Window) Number() protoreflect.EnumNumber {
// Deprecated: Use Window.Descriptor instead. // Deprecated: Use Window.Descriptor instead.
func (Window) EnumDescriptor() ([]byte, []int) { func (Window) EnumDescriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{2} return file_proto_logtail_proto_rawDescGZIP(), []int{3}
} }
// Filter restricts results to entries matching all specified fields. // Filter restricts results to entries matching all specified fields.
// Unset fields match everything. Exact-match and regex fields are ANDed. // Unset fields match everything. Exact-match and regex fields are ANDed.
type Filter struct { type Filter struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Website *string `protobuf:"bytes,1,opt,name=website,proto3,oneof" json:"website,omitempty"` Website *string `protobuf:"bytes,1,opt,name=website,proto3,oneof" json:"website,omitempty"`
ClientPrefix *string `protobuf:"bytes,2,opt,name=client_prefix,json=clientPrefix,proto3,oneof" json:"client_prefix,omitempty"` ClientPrefix *string `protobuf:"bytes,2,opt,name=client_prefix,json=clientPrefix,proto3,oneof" json:"client_prefix,omitempty"`
HttpRequestUri *string `protobuf:"bytes,3,opt,name=http_request_uri,json=httpRequestUri,proto3,oneof" json:"http_request_uri,omitempty"` HttpRequestUri *string `protobuf:"bytes,3,opt,name=http_request_uri,json=httpRequestUri,proto3,oneof" json:"http_request_uri,omitempty"`
HttpResponse *int32 `protobuf:"varint,4,opt,name=http_response,json=httpResponse,proto3,oneof" json:"http_response,omitempty"` HttpResponse *int32 `protobuf:"varint,4,opt,name=http_response,json=httpResponse,proto3,oneof" json:"http_response,omitempty"`
StatusOp StatusOp `protobuf:"varint,5,opt,name=status_op,json=statusOp,proto3,enum=logtail.StatusOp" json:"status_op,omitempty"` // operator for http_response; ignored when unset StatusOp StatusOp `protobuf:"varint,5,opt,name=status_op,json=statusOp,proto3,enum=logtail.StatusOp" json:"status_op,omitempty"` // operator for http_response; ignored when unset
WebsiteRegex *string `protobuf:"bytes,6,opt,name=website_regex,json=websiteRegex,proto3,oneof" json:"website_regex,omitempty"` // RE2 regex matched against website WebsiteRegex *string `protobuf:"bytes,6,opt,name=website_regex,json=websiteRegex,proto3,oneof" json:"website_regex,omitempty"` // RE2 regex matched against website
UriRegex *string `protobuf:"bytes,7,opt,name=uri_regex,json=uriRegex,proto3,oneof" json:"uri_regex,omitempty"` // RE2 regex matched against http_request_uri UriRegex *string `protobuf:"bytes,7,opt,name=uri_regex,json=uriRegex,proto3,oneof" json:"uri_regex,omitempty"` // RE2 regex matched against http_request_uri
unknownFields protoimpl.UnknownFields WebsiteRegexExclude *string `protobuf:"bytes,11,opt,name=website_regex_exclude,json=websiteRegexExclude,proto3,oneof" json:"website_regex_exclude,omitempty"` // RE2 regex; entries matching this are excluded
sizeCache protoimpl.SizeCache UriRegexExclude *string `protobuf:"bytes,12,opt,name=uri_regex_exclude,json=uriRegexExclude,proto3,oneof" json:"uri_regex_exclude,omitempty"` // RE2 regex; entries matching this are excluded
Tor TorFilter `protobuf:"varint,8,opt,name=tor,proto3,enum=logtail.TorFilter" json:"tor,omitempty"` // restrict to TOR / non-TOR clients
AsnNumber *int32 `protobuf:"varint,9,opt,name=asn_number,json=asnNumber,proto3,oneof" json:"asn_number,omitempty"` // filter by client ASN
AsnOp StatusOp `protobuf:"varint,10,opt,name=asn_op,json=asnOp,proto3,enum=logtail.StatusOp" json:"asn_op,omitempty"` // operator for asn_number; ignored when unset
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
} }
func (x *Filter) Reset() { func (x *Filter) Reset() {
*x = Filter{} *x = Filter{}
mi := &file_logtail_proto_msgTypes[0] mi := &file_proto_logtail_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -220,7 +279,7 @@ func (x *Filter) String() string {
func (*Filter) ProtoMessage() {} func (*Filter) ProtoMessage() {}
func (x *Filter) ProtoReflect() protoreflect.Message { func (x *Filter) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[0] mi := &file_proto_logtail_proto_msgTypes[0]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -233,7 +292,7 @@ func (x *Filter) ProtoReflect() protoreflect.Message {
// Deprecated: Use Filter.ProtoReflect.Descriptor instead. // Deprecated: Use Filter.ProtoReflect.Descriptor instead.
func (*Filter) Descriptor() ([]byte, []int) { func (*Filter) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{0} return file_proto_logtail_proto_rawDescGZIP(), []int{0}
} }
func (x *Filter) GetWebsite() string { func (x *Filter) GetWebsite() string {
@@ -285,6 +344,41 @@ func (x *Filter) GetUriRegex() string {
return "" return ""
} }
func (x *Filter) GetWebsiteRegexExclude() string {
if x != nil && x.WebsiteRegexExclude != nil {
return *x.WebsiteRegexExclude
}
return ""
}
func (x *Filter) GetUriRegexExclude() string {
if x != nil && x.UriRegexExclude != nil {
return *x.UriRegexExclude
}
return ""
}
func (x *Filter) GetTor() TorFilter {
if x != nil {
return x.Tor
}
return TorFilter_TOR_ANY
}
func (x *Filter) GetAsnNumber() int32 {
if x != nil && x.AsnNumber != nil {
return *x.AsnNumber
}
return 0
}
func (x *Filter) GetAsnOp() StatusOp {
if x != nil {
return x.AsnOp
}
return StatusOp_EQ
}
type TopNRequest struct { type TopNRequest struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Filter *Filter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` Filter *Filter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
@@ -297,7 +391,7 @@ type TopNRequest struct {
func (x *TopNRequest) Reset() { func (x *TopNRequest) Reset() {
*x = TopNRequest{} *x = TopNRequest{}
mi := &file_logtail_proto_msgTypes[1] mi := &file_proto_logtail_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -309,7 +403,7 @@ func (x *TopNRequest) String() string {
func (*TopNRequest) ProtoMessage() {} func (*TopNRequest) ProtoMessage() {}
func (x *TopNRequest) ProtoReflect() protoreflect.Message { func (x *TopNRequest) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[1] mi := &file_proto_logtail_proto_msgTypes[1]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -322,7 +416,7 @@ func (x *TopNRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use TopNRequest.ProtoReflect.Descriptor instead. // Deprecated: Use TopNRequest.ProtoReflect.Descriptor instead.
func (*TopNRequest) Descriptor() ([]byte, []int) { func (*TopNRequest) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{1} return file_proto_logtail_proto_rawDescGZIP(), []int{1}
} }
func (x *TopNRequest) GetFilter() *Filter { func (x *TopNRequest) GetFilter() *Filter {
@@ -363,7 +457,7 @@ type TopNEntry struct {
func (x *TopNEntry) Reset() { func (x *TopNEntry) Reset() {
*x = TopNEntry{} *x = TopNEntry{}
mi := &file_logtail_proto_msgTypes[2] mi := &file_proto_logtail_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -375,7 +469,7 @@ func (x *TopNEntry) String() string {
func (*TopNEntry) ProtoMessage() {} func (*TopNEntry) ProtoMessage() {}
func (x *TopNEntry) ProtoReflect() protoreflect.Message { func (x *TopNEntry) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[2] mi := &file_proto_logtail_proto_msgTypes[2]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -388,7 +482,7 @@ func (x *TopNEntry) ProtoReflect() protoreflect.Message {
// Deprecated: Use TopNEntry.ProtoReflect.Descriptor instead. // Deprecated: Use TopNEntry.ProtoReflect.Descriptor instead.
func (*TopNEntry) Descriptor() ([]byte, []int) { func (*TopNEntry) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{2} return file_proto_logtail_proto_rawDescGZIP(), []int{2}
} }
func (x *TopNEntry) GetLabel() string { func (x *TopNEntry) GetLabel() string {
@@ -415,7 +509,7 @@ type TopNResponse struct {
func (x *TopNResponse) Reset() { func (x *TopNResponse) Reset() {
*x = TopNResponse{} *x = TopNResponse{}
mi := &file_logtail_proto_msgTypes[3] mi := &file_proto_logtail_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -427,7 +521,7 @@ func (x *TopNResponse) String() string {
func (*TopNResponse) ProtoMessage() {} func (*TopNResponse) ProtoMessage() {}
func (x *TopNResponse) ProtoReflect() protoreflect.Message { func (x *TopNResponse) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[3] mi := &file_proto_logtail_proto_msgTypes[3]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -440,7 +534,7 @@ func (x *TopNResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use TopNResponse.ProtoReflect.Descriptor instead. // Deprecated: Use TopNResponse.ProtoReflect.Descriptor instead.
func (*TopNResponse) Descriptor() ([]byte, []int) { func (*TopNResponse) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{3} return file_proto_logtail_proto_rawDescGZIP(), []int{3}
} }
func (x *TopNResponse) GetEntries() []*TopNEntry { func (x *TopNResponse) GetEntries() []*TopNEntry {
@@ -467,7 +561,7 @@ type TrendRequest struct {
func (x *TrendRequest) Reset() { func (x *TrendRequest) Reset() {
*x = TrendRequest{} *x = TrendRequest{}
mi := &file_logtail_proto_msgTypes[4] mi := &file_proto_logtail_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -479,7 +573,7 @@ func (x *TrendRequest) String() string {
func (*TrendRequest) ProtoMessage() {} func (*TrendRequest) ProtoMessage() {}
func (x *TrendRequest) ProtoReflect() protoreflect.Message { func (x *TrendRequest) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[4] mi := &file_proto_logtail_proto_msgTypes[4]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -492,7 +586,7 @@ func (x *TrendRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use TrendRequest.ProtoReflect.Descriptor instead. // Deprecated: Use TrendRequest.ProtoReflect.Descriptor instead.
func (*TrendRequest) Descriptor() ([]byte, []int) { func (*TrendRequest) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{4} return file_proto_logtail_proto_rawDescGZIP(), []int{4}
} }
func (x *TrendRequest) GetFilter() *Filter { func (x *TrendRequest) GetFilter() *Filter {
@@ -519,7 +613,7 @@ type TrendPoint struct {
func (x *TrendPoint) Reset() { func (x *TrendPoint) Reset() {
*x = TrendPoint{} *x = TrendPoint{}
mi := &file_logtail_proto_msgTypes[5] mi := &file_proto_logtail_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -531,7 +625,7 @@ func (x *TrendPoint) String() string {
func (*TrendPoint) ProtoMessage() {} func (*TrendPoint) ProtoMessage() {}
func (x *TrendPoint) ProtoReflect() protoreflect.Message { func (x *TrendPoint) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[5] mi := &file_proto_logtail_proto_msgTypes[5]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -544,7 +638,7 @@ func (x *TrendPoint) ProtoReflect() protoreflect.Message {
// Deprecated: Use TrendPoint.ProtoReflect.Descriptor instead. // Deprecated: Use TrendPoint.ProtoReflect.Descriptor instead.
func (*TrendPoint) Descriptor() ([]byte, []int) { func (*TrendPoint) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{5} return file_proto_logtail_proto_rawDescGZIP(), []int{5}
} }
func (x *TrendPoint) GetTimestampUnix() int64 { func (x *TrendPoint) GetTimestampUnix() int64 {
@@ -571,7 +665,7 @@ type TrendResponse struct {
func (x *TrendResponse) Reset() { func (x *TrendResponse) Reset() {
*x = TrendResponse{} *x = TrendResponse{}
mi := &file_logtail_proto_msgTypes[6] mi := &file_proto_logtail_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -583,7 +677,7 @@ func (x *TrendResponse) String() string {
func (*TrendResponse) ProtoMessage() {} func (*TrendResponse) ProtoMessage() {}
func (x *TrendResponse) ProtoReflect() protoreflect.Message { func (x *TrendResponse) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[6] mi := &file_proto_logtail_proto_msgTypes[6]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -596,7 +690,7 @@ func (x *TrendResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use TrendResponse.ProtoReflect.Descriptor instead. // Deprecated: Use TrendResponse.ProtoReflect.Descriptor instead.
func (*TrendResponse) Descriptor() ([]byte, []int) { func (*TrendResponse) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{6} return file_proto_logtail_proto_rawDescGZIP(), []int{6}
} }
func (x *TrendResponse) GetPoints() []*TrendPoint { func (x *TrendResponse) GetPoints() []*TrendPoint {
@@ -621,7 +715,7 @@ type SnapshotRequest struct {
func (x *SnapshotRequest) Reset() { func (x *SnapshotRequest) Reset() {
*x = SnapshotRequest{} *x = SnapshotRequest{}
mi := &file_logtail_proto_msgTypes[7] mi := &file_proto_logtail_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -633,7 +727,7 @@ func (x *SnapshotRequest) String() string {
func (*SnapshotRequest) ProtoMessage() {} func (*SnapshotRequest) ProtoMessage() {}
func (x *SnapshotRequest) ProtoReflect() protoreflect.Message { func (x *SnapshotRequest) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[7] mi := &file_proto_logtail_proto_msgTypes[7]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -646,21 +740,22 @@ func (x *SnapshotRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SnapshotRequest.ProtoReflect.Descriptor instead. // Deprecated: Use SnapshotRequest.ProtoReflect.Descriptor instead.
func (*SnapshotRequest) Descriptor() ([]byte, []int) { func (*SnapshotRequest) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{7} return file_proto_logtail_proto_rawDescGZIP(), []int{7}
} }
type Snapshot struct { type Snapshot struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Entries []*TopNEntry `protobuf:"bytes,3,rep,name=entries,proto3" json:"entries,omitempty"` // top-50K for this 1-minute bucket, sorted desc Entries []*TopNEntry `protobuf:"bytes,3,rep,name=entries,proto3" json:"entries,omitempty"` // top-50K for this 1-minute bucket, sorted desc
IsCoarse bool `protobuf:"varint,4,opt,name=is_coarse,json=isCoarse,proto3" json:"is_coarse,omitempty"` // true for coarse-ring (5-min) buckets in DumpSnapshots
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
} }
func (x *Snapshot) Reset() { func (x *Snapshot) Reset() {
*x = Snapshot{} *x = Snapshot{}
mi := &file_logtail_proto_msgTypes[8] mi := &file_proto_logtail_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -672,7 +767,7 @@ func (x *Snapshot) String() string {
func (*Snapshot) ProtoMessage() {} func (*Snapshot) ProtoMessage() {}
func (x *Snapshot) ProtoReflect() protoreflect.Message { func (x *Snapshot) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[8] mi := &file_proto_logtail_proto_msgTypes[8]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -685,7 +780,7 @@ func (x *Snapshot) ProtoReflect() protoreflect.Message {
// Deprecated: Use Snapshot.ProtoReflect.Descriptor instead. // Deprecated: Use Snapshot.ProtoReflect.Descriptor instead.
func (*Snapshot) Descriptor() ([]byte, []int) { func (*Snapshot) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{8} return file_proto_logtail_proto_rawDescGZIP(), []int{8}
} }
func (x *Snapshot) GetSource() string { func (x *Snapshot) GetSource() string {
@@ -709,6 +804,49 @@ func (x *Snapshot) GetEntries() []*TopNEntry {
return nil return nil
} }
func (x *Snapshot) GetIsCoarse() bool {
if x != nil {
return x.IsCoarse
}
return false
}
type DumpSnapshotsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DumpSnapshotsRequest) Reset() {
*x = DumpSnapshotsRequest{}
mi := &file_proto_logtail_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DumpSnapshotsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DumpSnapshotsRequest) ProtoMessage() {}
func (x *DumpSnapshotsRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_logtail_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DumpSnapshotsRequest.ProtoReflect.Descriptor instead.
func (*DumpSnapshotsRequest) Descriptor() ([]byte, []int) {
return file_proto_logtail_proto_rawDescGZIP(), []int{9}
}
type ListTargetsRequest struct { type ListTargetsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"` state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields unknownFields protoimpl.UnknownFields
@@ -717,7 +855,7 @@ type ListTargetsRequest struct {
func (x *ListTargetsRequest) Reset() { func (x *ListTargetsRequest) Reset() {
*x = ListTargetsRequest{} *x = ListTargetsRequest{}
mi := &file_logtail_proto_msgTypes[9] mi := &file_proto_logtail_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -729,7 +867,7 @@ func (x *ListTargetsRequest) String() string {
func (*ListTargetsRequest) ProtoMessage() {} func (*ListTargetsRequest) ProtoMessage() {}
func (x *ListTargetsRequest) ProtoReflect() protoreflect.Message { func (x *ListTargetsRequest) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[9] mi := &file_proto_logtail_proto_msgTypes[10]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -742,7 +880,7 @@ func (x *ListTargetsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListTargetsRequest.ProtoReflect.Descriptor instead. // Deprecated: Use ListTargetsRequest.ProtoReflect.Descriptor instead.
func (*ListTargetsRequest) Descriptor() ([]byte, []int) { func (*ListTargetsRequest) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{9} return file_proto_logtail_proto_rawDescGZIP(), []int{10}
} }
type TargetInfo struct { type TargetInfo struct {
@@ -755,7 +893,7 @@ type TargetInfo struct {
func (x *TargetInfo) Reset() { func (x *TargetInfo) Reset() {
*x = TargetInfo{} *x = TargetInfo{}
mi := &file_logtail_proto_msgTypes[10] mi := &file_proto_logtail_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -767,7 +905,7 @@ func (x *TargetInfo) String() string {
func (*TargetInfo) ProtoMessage() {} func (*TargetInfo) ProtoMessage() {}
func (x *TargetInfo) ProtoReflect() protoreflect.Message { func (x *TargetInfo) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[10] mi := &file_proto_logtail_proto_msgTypes[11]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -780,7 +918,7 @@ func (x *TargetInfo) ProtoReflect() protoreflect.Message {
// Deprecated: Use TargetInfo.ProtoReflect.Descriptor instead. // Deprecated: Use TargetInfo.ProtoReflect.Descriptor instead.
func (*TargetInfo) Descriptor() ([]byte, []int) { func (*TargetInfo) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{10} return file_proto_logtail_proto_rawDescGZIP(), []int{11}
} }
func (x *TargetInfo) GetName() string { func (x *TargetInfo) GetName() string {
@@ -806,7 +944,7 @@ type ListTargetsResponse struct {
func (x *ListTargetsResponse) Reset() { func (x *ListTargetsResponse) Reset() {
*x = ListTargetsResponse{} *x = ListTargetsResponse{}
mi := &file_logtail_proto_msgTypes[11] mi := &file_proto_logtail_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi) ms.StoreMessageInfo(mi)
} }
@@ -818,7 +956,7 @@ func (x *ListTargetsResponse) String() string {
func (*ListTargetsResponse) ProtoMessage() {} func (*ListTargetsResponse) ProtoMessage() {}
func (x *ListTargetsResponse) ProtoReflect() protoreflect.Message { func (x *ListTargetsResponse) ProtoReflect() protoreflect.Message {
mi := &file_logtail_proto_msgTypes[11] mi := &file_proto_logtail_proto_msgTypes[12]
if x != nil { if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil { if ms.LoadMessageInfo() == nil {
@@ -831,7 +969,7 @@ func (x *ListTargetsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListTargetsResponse.ProtoReflect.Descriptor instead. // Deprecated: Use ListTargetsResponse.ProtoReflect.Descriptor instead.
func (*ListTargetsResponse) Descriptor() ([]byte, []int) { func (*ListTargetsResponse) Descriptor() ([]byte, []int) {
return file_logtail_proto_rawDescGZIP(), []int{11} return file_proto_logtail_proto_rawDescGZIP(), []int{12}
} }
func (x *ListTargetsResponse) GetTargets() []*TargetInfo { func (x *ListTargetsResponse) GetTargets() []*TargetInfo {
@@ -841,11 +979,11 @@ func (x *ListTargetsResponse) GetTargets() []*TargetInfo {
return nil return nil
} }
var File_logtail_proto protoreflect.FileDescriptor var File_proto_logtail_proto protoreflect.FileDescriptor
const file_logtail_proto_rawDesc = "" + const file_proto_logtail_proto_rawDesc = "" +
"\n" + "\n" +
"\rlogtail.proto\x12\alogtail\"\x8b\x03\n" + "\x13proto/logtail.proto\x12\alogtail\"\xa8\x05\n" +
"\x06Filter\x12\x1d\n" + "\x06Filter\x12\x1d\n" +
"\awebsite\x18\x01 \x01(\tH\x00R\awebsite\x88\x01\x01\x12(\n" + "\awebsite\x18\x01 \x01(\tH\x00R\awebsite\x88\x01\x01\x12(\n" +
"\rclient_prefix\x18\x02 \x01(\tH\x01R\fclientPrefix\x88\x01\x01\x12-\n" + "\rclient_prefix\x18\x02 \x01(\tH\x01R\fclientPrefix\x88\x01\x01\x12-\n" +
@@ -853,7 +991,14 @@ const file_logtail_proto_rawDesc = "" +
"\rhttp_response\x18\x04 \x01(\x05H\x03R\fhttpResponse\x88\x01\x01\x12.\n" + "\rhttp_response\x18\x04 \x01(\x05H\x03R\fhttpResponse\x88\x01\x01\x12.\n" +
"\tstatus_op\x18\x05 \x01(\x0e2\x11.logtail.StatusOpR\bstatusOp\x12(\n" + "\tstatus_op\x18\x05 \x01(\x0e2\x11.logtail.StatusOpR\bstatusOp\x12(\n" +
"\rwebsite_regex\x18\x06 \x01(\tH\x04R\fwebsiteRegex\x88\x01\x01\x12 \n" + "\rwebsite_regex\x18\x06 \x01(\tH\x04R\fwebsiteRegex\x88\x01\x01\x12 \n" +
"\turi_regex\x18\a \x01(\tH\x05R\buriRegex\x88\x01\x01B\n" + "\turi_regex\x18\a \x01(\tH\x05R\buriRegex\x88\x01\x01\x127\n" +
"\x15website_regex_exclude\x18\v \x01(\tH\x06R\x13websiteRegexExclude\x88\x01\x01\x12/\n" +
"\x11uri_regex_exclude\x18\f \x01(\tH\aR\x0furiRegexExclude\x88\x01\x01\x12$\n" +
"\x03tor\x18\b \x01(\x0e2\x12.logtail.TorFilterR\x03tor\x12\"\n" +
"\n" +
"asn_number\x18\t \x01(\x05H\bR\tasnNumber\x88\x01\x01\x12(\n" +
"\x06asn_op\x18\n" +
" \x01(\x0e2\x11.logtail.StatusOpR\x05asnOpB\n" +
"\n" + "\n" +
"\b_websiteB\x10\n" + "\b_websiteB\x10\n" +
"\x0e_client_prefixB\x13\n" + "\x0e_client_prefixB\x13\n" +
@@ -861,7 +1006,10 @@ const file_logtail_proto_rawDesc = "" +
"\x0e_http_responseB\x10\n" + "\x0e_http_responseB\x10\n" +
"\x0e_website_regexB\f\n" + "\x0e_website_regexB\f\n" +
"\n" + "\n" +
"_uri_regex\"\x9a\x01\n" + "_uri_regexB\x18\n" +
"\x16_website_regex_excludeB\x14\n" +
"\x12_uri_regex_excludeB\r\n" +
"\v_asn_number\"\x9a\x01\n" +
"\vTopNRequest\x12'\n" + "\vTopNRequest\x12'\n" +
"\x06filter\x18\x01 \x01(\v2\x0f.logtail.FilterR\x06filter\x12+\n" + "\x06filter\x18\x01 \x01(\v2\x0f.logtail.FilterR\x06filter\x12+\n" +
"\bgroup_by\x18\x02 \x01(\x0e2\x10.logtail.GroupByR\agroupBy\x12\f\n" + "\bgroup_by\x18\x02 \x01(\x0e2\x10.logtail.GroupByR\agroupBy\x12\f\n" +
@@ -883,122 +1031,138 @@ const file_logtail_proto_rawDesc = "" +
"\rTrendResponse\x12+\n" + "\rTrendResponse\x12+\n" +
"\x06points\x18\x01 \x03(\v2\x13.logtail.TrendPointR\x06points\x12\x16\n" + "\x06points\x18\x01 \x03(\v2\x13.logtail.TrendPointR\x06points\x12\x16\n" +
"\x06source\x18\x02 \x01(\tR\x06source\"\x11\n" + "\x06source\x18\x02 \x01(\tR\x06source\"\x11\n" +
"\x0fSnapshotRequest\"n\n" + "\x0fSnapshotRequest\"\x8b\x01\n" +
"\bSnapshot\x12\x16\n" + "\bSnapshot\x12\x16\n" +
"\x06source\x18\x01 \x01(\tR\x06source\x12\x1c\n" + "\x06source\x18\x01 \x01(\tR\x06source\x12\x1c\n" +
"\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12,\n" + "\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12,\n" +
"\aentries\x18\x03 \x03(\v2\x12.logtail.TopNEntryR\aentries\"\x14\n" + "\aentries\x18\x03 \x03(\v2\x12.logtail.TopNEntryR\aentries\x12\x1b\n" +
"\tis_coarse\x18\x04 \x01(\bR\bisCoarse\"\x16\n" +
"\x14DumpSnapshotsRequest\"\x14\n" +
"\x12ListTargetsRequest\"4\n" + "\x12ListTargetsRequest\"4\n" +
"\n" + "\n" +
"TargetInfo\x12\x12\n" + "TargetInfo\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" +
"\x04addr\x18\x02 \x01(\tR\x04addr\"D\n" + "\x04addr\x18\x02 \x01(\tR\x04addr\"D\n" +
"\x13ListTargetsResponse\x12-\n" + "\x13ListTargetsResponse\x12-\n" +
"\atargets\x18\x01 \x03(\v2\x13.logtail.TargetInfoR\atargets*:\n" + "\atargets\x18\x01 \x03(\v2\x13.logtail.TargetInfoR\atargets*1\n" +
"\tTorFilter\x12\v\n" +
"\aTOR_ANY\x10\x00\x12\v\n" +
"\aTOR_YES\x10\x01\x12\n" +
"\n" +
"\x06TOR_NO\x10\x02*:\n" +
"\bStatusOp\x12\x06\n" + "\bStatusOp\x12\x06\n" +
"\x02EQ\x10\x00\x12\x06\n" + "\x02EQ\x10\x00\x12\x06\n" +
"\x02NE\x10\x01\x12\x06\n" + "\x02NE\x10\x01\x12\x06\n" +
"\x02GT\x10\x02\x12\x06\n" + "\x02GT\x10\x02\x12\x06\n" +
"\x02GE\x10\x03\x12\x06\n" + "\x02GE\x10\x03\x12\x06\n" +
"\x02LT\x10\x04\x12\x06\n" + "\x02LT\x10\x04\x12\x06\n" +
"\x02LE\x10\x05*M\n" + "\x02LE\x10\x05*]\n" +
"\aGroupBy\x12\v\n" + "\aGroupBy\x12\v\n" +
"\aWEBSITE\x10\x00\x12\x11\n" + "\aWEBSITE\x10\x00\x12\x11\n" +
"\rCLIENT_PREFIX\x10\x01\x12\x0f\n" + "\rCLIENT_PREFIX\x10\x01\x12\x0f\n" +
"\vREQUEST_URI\x10\x02\x12\x11\n" + "\vREQUEST_URI\x10\x02\x12\x11\n" +
"\rHTTP_RESPONSE\x10\x03*A\n" + "\rHTTP_RESPONSE\x10\x03\x12\x0e\n" +
"\n" +
"ASN_NUMBER\x10\x04*A\n" +
"\x06Window\x12\a\n" + "\x06Window\x12\a\n" +
"\x03W1M\x10\x00\x12\a\n" + "\x03W1M\x10\x00\x12\a\n" +
"\x03W5M\x10\x01\x12\b\n" + "\x03W5M\x10\x01\x12\b\n" +
"\x04W15M\x10\x02\x12\b\n" + "\x04W15M\x10\x02\x12\b\n" +
"\x04W60M\x10\x03\x12\a\n" + "\x04W60M\x10\x03\x12\a\n" +
"\x03W6H\x10\x04\x12\b\n" + "\x03W6H\x10\x04\x12\b\n" +
"\x04W24H\x10\x052\x89\x02\n" + "\x04W24H\x10\x052\xce\x02\n" +
"\x0eLogtailService\x123\n" + "\x0eLogtailService\x123\n" +
"\x04TopN\x12\x14.logtail.TopNRequest\x1a\x15.logtail.TopNResponse\x126\n" + "\x04TopN\x12\x14.logtail.TopNRequest\x1a\x15.logtail.TopNResponse\x126\n" +
"\x05Trend\x12\x15.logtail.TrendRequest\x1a\x16.logtail.TrendResponse\x12@\n" + "\x05Trend\x12\x15.logtail.TrendRequest\x1a\x16.logtail.TrendResponse\x12@\n" +
"\x0fStreamSnapshots\x12\x18.logtail.SnapshotRequest\x1a\x11.logtail.Snapshot0\x01\x12H\n" + "\x0fStreamSnapshots\x12\x18.logtail.SnapshotRequest\x1a\x11.logtail.Snapshot0\x01\x12H\n" +
"\vListTargets\x12\x1b.logtail.ListTargetsRequest\x1a\x1c.logtail.ListTargetsResponseB0Z.git.ipng.ch/ipng/nginx-logtail/proto/logtailpbb\x06proto3" "\vListTargets\x12\x1b.logtail.ListTargetsRequest\x1a\x1c.logtail.ListTargetsResponse\x12C\n" +
"\rDumpSnapshots\x12\x1d.logtail.DumpSnapshotsRequest\x1a\x11.logtail.Snapshot0\x01B0Z.git.ipng.ch/ipng/nginx-logtail/proto/logtailpbb\x06proto3"
var ( var (
file_logtail_proto_rawDescOnce sync.Once file_proto_logtail_proto_rawDescOnce sync.Once
file_logtail_proto_rawDescData []byte file_proto_logtail_proto_rawDescData []byte
) )
func file_logtail_proto_rawDescGZIP() []byte { func file_proto_logtail_proto_rawDescGZIP() []byte {
file_logtail_proto_rawDescOnce.Do(func() { file_proto_logtail_proto_rawDescOnce.Do(func() {
file_logtail_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_logtail_proto_rawDesc), len(file_logtail_proto_rawDesc))) file_proto_logtail_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_logtail_proto_rawDesc), len(file_proto_logtail_proto_rawDesc)))
}) })
return file_logtail_proto_rawDescData return file_proto_logtail_proto_rawDescData
} }
var file_logtail_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_proto_logtail_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
var file_logtail_proto_msgTypes = make([]protoimpl.MessageInfo, 12) var file_proto_logtail_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
var file_logtail_proto_goTypes = []any{ var file_proto_logtail_proto_goTypes = []any{
(StatusOp)(0), // 0: logtail.StatusOp (TorFilter)(0), // 0: logtail.TorFilter
(GroupBy)(0), // 1: logtail.GroupBy (StatusOp)(0), // 1: logtail.StatusOp
(Window)(0), // 2: logtail.Window (GroupBy)(0), // 2: logtail.GroupBy
(*Filter)(nil), // 3: logtail.Filter (Window)(0), // 3: logtail.Window
(*TopNRequest)(nil), // 4: logtail.TopNRequest (*Filter)(nil), // 4: logtail.Filter
(*TopNEntry)(nil), // 5: logtail.TopNEntry (*TopNRequest)(nil), // 5: logtail.TopNRequest
(*TopNResponse)(nil), // 6: logtail.TopNResponse (*TopNEntry)(nil), // 6: logtail.TopNEntry
(*TrendRequest)(nil), // 7: logtail.TrendRequest (*TopNResponse)(nil), // 7: logtail.TopNResponse
(*TrendPoint)(nil), // 8: logtail.TrendPoint (*TrendRequest)(nil), // 8: logtail.TrendRequest
(*TrendResponse)(nil), // 9: logtail.TrendResponse (*TrendPoint)(nil), // 9: logtail.TrendPoint
(*SnapshotRequest)(nil), // 10: logtail.SnapshotRequest (*TrendResponse)(nil), // 10: logtail.TrendResponse
(*Snapshot)(nil), // 11: logtail.Snapshot (*SnapshotRequest)(nil), // 11: logtail.SnapshotRequest
(*ListTargetsRequest)(nil), // 12: logtail.ListTargetsRequest (*Snapshot)(nil), // 12: logtail.Snapshot
(*TargetInfo)(nil), // 13: logtail.TargetInfo (*DumpSnapshotsRequest)(nil), // 13: logtail.DumpSnapshotsRequest
(*ListTargetsResponse)(nil), // 14: logtail.ListTargetsResponse (*ListTargetsRequest)(nil), // 14: logtail.ListTargetsRequest
(*TargetInfo)(nil), // 15: logtail.TargetInfo
(*ListTargetsResponse)(nil), // 16: logtail.ListTargetsResponse
} }
var file_logtail_proto_depIdxs = []int32{ var file_proto_logtail_proto_depIdxs = []int32{
0, // 0: logtail.Filter.status_op:type_name -> logtail.StatusOp 1, // 0: logtail.Filter.status_op:type_name -> logtail.StatusOp
3, // 1: logtail.TopNRequest.filter:type_name -> logtail.Filter 0, // 1: logtail.Filter.tor:type_name -> logtail.TorFilter
1, // 2: logtail.TopNRequest.group_by:type_name -> logtail.GroupBy 1, // 2: logtail.Filter.asn_op:type_name -> logtail.StatusOp
2, // 3: logtail.TopNRequest.window:type_name -> logtail.Window 4, // 3: logtail.TopNRequest.filter:type_name -> logtail.Filter
5, // 4: logtail.TopNResponse.entries:type_name -> logtail.TopNEntry 2, // 4: logtail.TopNRequest.group_by:type_name -> logtail.GroupBy
3, // 5: logtail.TrendRequest.filter:type_name -> logtail.Filter 3, // 5: logtail.TopNRequest.window:type_name -> logtail.Window
2, // 6: logtail.TrendRequest.window:type_name -> logtail.Window 6, // 6: logtail.TopNResponse.entries:type_name -> logtail.TopNEntry
8, // 7: logtail.TrendResponse.points:type_name -> logtail.TrendPoint 4, // 7: logtail.TrendRequest.filter:type_name -> logtail.Filter
5, // 8: logtail.Snapshot.entries:type_name -> logtail.TopNEntry 3, // 8: logtail.TrendRequest.window:type_name -> logtail.Window
13, // 9: logtail.ListTargetsResponse.targets:type_name -> logtail.TargetInfo 9, // 9: logtail.TrendResponse.points:type_name -> logtail.TrendPoint
4, // 10: logtail.LogtailService.TopN:input_type -> logtail.TopNRequest 6, // 10: logtail.Snapshot.entries:type_name -> logtail.TopNEntry
7, // 11: logtail.LogtailService.Trend:input_type -> logtail.TrendRequest 15, // 11: logtail.ListTargetsResponse.targets:type_name -> logtail.TargetInfo
10, // 12: logtail.LogtailService.StreamSnapshots:input_type -> logtail.SnapshotRequest 5, // 12: logtail.LogtailService.TopN:input_type -> logtail.TopNRequest
12, // 13: logtail.LogtailService.ListTargets:input_type -> logtail.ListTargetsRequest 8, // 13: logtail.LogtailService.Trend:input_type -> logtail.TrendRequest
6, // 14: logtail.LogtailService.TopN:output_type -> logtail.TopNResponse 11, // 14: logtail.LogtailService.StreamSnapshots:input_type -> logtail.SnapshotRequest
9, // 15: logtail.LogtailService.Trend:output_type -> logtail.TrendResponse 14, // 15: logtail.LogtailService.ListTargets:input_type -> logtail.ListTargetsRequest
11, // 16: logtail.LogtailService.StreamSnapshots:output_type -> logtail.Snapshot 13, // 16: logtail.LogtailService.DumpSnapshots:input_type -> logtail.DumpSnapshotsRequest
14, // 17: logtail.LogtailService.ListTargets:output_type -> logtail.ListTargetsResponse 7, // 17: logtail.LogtailService.TopN:output_type -> logtail.TopNResponse
14, // [14:18] is the sub-list for method output_type 10, // 18: logtail.LogtailService.Trend:output_type -> logtail.TrendResponse
10, // [10:14] is the sub-list for method input_type 12, // 19: logtail.LogtailService.StreamSnapshots:output_type -> logtail.Snapshot
10, // [10:10] is the sub-list for extension type_name 16, // 20: logtail.LogtailService.ListTargets:output_type -> logtail.ListTargetsResponse
10, // [10:10] is the sub-list for extension extendee 12, // 21: logtail.LogtailService.DumpSnapshots:output_type -> logtail.Snapshot
0, // [0:10] is the sub-list for field type_name 17, // [17:22] is the sub-list for method output_type
12, // [12:17] is the sub-list for method input_type
12, // [12:12] is the sub-list for extension type_name
12, // [12:12] is the sub-list for extension extendee
0, // [0:12] is the sub-list for field type_name
} }
func init() { file_logtail_proto_init() } func init() { file_proto_logtail_proto_init() }
func file_logtail_proto_init() { func file_proto_logtail_proto_init() {
if File_logtail_proto != nil { if File_proto_logtail_proto != nil {
return return
} }
file_logtail_proto_msgTypes[0].OneofWrappers = []any{} file_proto_logtail_proto_msgTypes[0].OneofWrappers = []any{}
type x struct{} type x struct{}
out := protoimpl.TypeBuilder{ out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{ File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_logtail_proto_rawDesc), len(file_logtail_proto_rawDesc)), RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_logtail_proto_rawDesc), len(file_proto_logtail_proto_rawDesc)),
NumEnums: 3, NumEnums: 4,
NumMessages: 12, NumMessages: 13,
NumExtensions: 0, NumExtensions: 0,
NumServices: 1, NumServices: 1,
}, },
GoTypes: file_logtail_proto_goTypes, GoTypes: file_proto_logtail_proto_goTypes,
DependencyIndexes: file_logtail_proto_depIdxs, DependencyIndexes: file_proto_logtail_proto_depIdxs,
EnumInfos: file_logtail_proto_enumTypes, EnumInfos: file_proto_logtail_proto_enumTypes,
MessageInfos: file_logtail_proto_msgTypes, MessageInfos: file_proto_logtail_proto_msgTypes,
}.Build() }.Build()
File_logtail_proto = out.File File_proto_logtail_proto = out.File
file_logtail_proto_goTypes = nil file_proto_logtail_proto_goTypes = nil
file_logtail_proto_depIdxs = nil file_proto_logtail_proto_depIdxs = nil
} }

View File

@@ -2,7 +2,7 @@
// versions: // versions:
// - protoc-gen-go-grpc v1.6.1 // - protoc-gen-go-grpc v1.6.1
// - protoc v3.21.12 // - protoc v3.21.12
// source: logtail.proto // source: proto/logtail.proto
package logtailpb package logtailpb
@@ -23,6 +23,7 @@ const (
LogtailService_Trend_FullMethodName = "/logtail.LogtailService/Trend" LogtailService_Trend_FullMethodName = "/logtail.LogtailService/Trend"
LogtailService_StreamSnapshots_FullMethodName = "/logtail.LogtailService/StreamSnapshots" LogtailService_StreamSnapshots_FullMethodName = "/logtail.LogtailService/StreamSnapshots"
LogtailService_ListTargets_FullMethodName = "/logtail.LogtailService/ListTargets" LogtailService_ListTargets_FullMethodName = "/logtail.LogtailService/ListTargets"
LogtailService_DumpSnapshots_FullMethodName = "/logtail.LogtailService/DumpSnapshots"
) )
// LogtailServiceClient is the client API for LogtailService service. // LogtailServiceClient is the client API for LogtailService service.
@@ -33,6 +34,7 @@ type LogtailServiceClient interface {
Trend(ctx context.Context, in *TrendRequest, opts ...grpc.CallOption) (*TrendResponse, error) Trend(ctx context.Context, in *TrendRequest, opts ...grpc.CallOption) (*TrendResponse, error)
StreamSnapshots(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Snapshot], error) StreamSnapshots(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Snapshot], error)
ListTargets(ctx context.Context, in *ListTargetsRequest, opts ...grpc.CallOption) (*ListTargetsResponse, error) ListTargets(ctx context.Context, in *ListTargetsRequest, opts ...grpc.CallOption) (*ListTargetsResponse, error)
DumpSnapshots(ctx context.Context, in *DumpSnapshotsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Snapshot], error)
} }
type logtailServiceClient struct { type logtailServiceClient struct {
@@ -92,6 +94,25 @@ func (c *logtailServiceClient) ListTargets(ctx context.Context, in *ListTargetsR
return out, nil return out, nil
} }
func (c *logtailServiceClient) DumpSnapshots(ctx context.Context, in *DumpSnapshotsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Snapshot], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &LogtailService_ServiceDesc.Streams[1], LogtailService_DumpSnapshots_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[DumpSnapshotsRequest, Snapshot]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type LogtailService_DumpSnapshotsClient = grpc.ServerStreamingClient[Snapshot]
// LogtailServiceServer is the server API for LogtailService service. // LogtailServiceServer is the server API for LogtailService service.
// All implementations must embed UnimplementedLogtailServiceServer // All implementations must embed UnimplementedLogtailServiceServer
// for forward compatibility. // for forward compatibility.
@@ -100,6 +121,7 @@ type LogtailServiceServer interface {
Trend(context.Context, *TrendRequest) (*TrendResponse, error) Trend(context.Context, *TrendRequest) (*TrendResponse, error)
StreamSnapshots(*SnapshotRequest, grpc.ServerStreamingServer[Snapshot]) error StreamSnapshots(*SnapshotRequest, grpc.ServerStreamingServer[Snapshot]) error
ListTargets(context.Context, *ListTargetsRequest) (*ListTargetsResponse, error) ListTargets(context.Context, *ListTargetsRequest) (*ListTargetsResponse, error)
DumpSnapshots(*DumpSnapshotsRequest, grpc.ServerStreamingServer[Snapshot]) error
mustEmbedUnimplementedLogtailServiceServer() mustEmbedUnimplementedLogtailServiceServer()
} }
@@ -122,6 +144,9 @@ func (UnimplementedLogtailServiceServer) StreamSnapshots(*SnapshotRequest, grpc.
func (UnimplementedLogtailServiceServer) ListTargets(context.Context, *ListTargetsRequest) (*ListTargetsResponse, error) { func (UnimplementedLogtailServiceServer) ListTargets(context.Context, *ListTargetsRequest) (*ListTargetsResponse, error) {
return nil, status.Error(codes.Unimplemented, "method ListTargets not implemented") return nil, status.Error(codes.Unimplemented, "method ListTargets not implemented")
} }
func (UnimplementedLogtailServiceServer) DumpSnapshots(*DumpSnapshotsRequest, grpc.ServerStreamingServer[Snapshot]) error {
return status.Error(codes.Unimplemented, "method DumpSnapshots not implemented")
}
func (UnimplementedLogtailServiceServer) mustEmbedUnimplementedLogtailServiceServer() {} func (UnimplementedLogtailServiceServer) mustEmbedUnimplementedLogtailServiceServer() {}
func (UnimplementedLogtailServiceServer) testEmbeddedByValue() {} func (UnimplementedLogtailServiceServer) testEmbeddedByValue() {}
@@ -208,6 +233,17 @@ func _LogtailService_ListTargets_Handler(srv interface{}, ctx context.Context, d
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _LogtailService_DumpSnapshots_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(DumpSnapshotsRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(LogtailServiceServer).DumpSnapshots(m, &grpc.GenericServerStream[DumpSnapshotsRequest, Snapshot]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type LogtailService_DumpSnapshotsServer = grpc.ServerStreamingServer[Snapshot]
// LogtailService_ServiceDesc is the grpc.ServiceDesc for LogtailService service. // LogtailService_ServiceDesc is the grpc.ServiceDesc for LogtailService service.
// It's only intended for direct use with grpc.RegisterService, // It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy) // and not to be introspected or modified (even as a copy)
@@ -234,6 +270,11 @@ var LogtailService_ServiceDesc = grpc.ServiceDesc{
Handler: _LogtailService_StreamSnapshots_Handler, Handler: _LogtailService_StreamSnapshots_Handler,
ServerStreams: true, ServerStreams: true,
}, },
{
StreamName: "DumpSnapshots",
Handler: _LogtailService_DumpSnapshots_Handler,
ServerStreams: true,
},
}, },
Metadata: "logtail.proto", Metadata: "proto/logtail.proto",
} }