Introduces a static-binary build and Debian package (amd64/arm64) with version/commit/date stamped via -ldflags. Ships section-1 manpages for ctool, ctfetch, and ctail. Adds a `version` subcommand reachable as `ctool version`, `ctool -version`, `ctool --version`, `ctool fetch version`, `ctool tail version`, and via the ctfetch/ctail symlinks. Adds tests covering the dispatcher, fetch/tail argument parsing, and the formatter/helper functions. Adds a retrofit design document modelled on the vpp-maglev one, with FRs and NFRs for each tool and the dispatcher. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
614 lines
18 KiB
Go
614 lines
18 KiB
Go
// Package utils provides shared functionality for dumping CT log tile entries.
|
|
// (C) Copyright 2026 Pim van Pelt <pim@ipng.ch>
|
|
package utils
|
|
|
|
import (
|
|
"bytes"
|
|
"compress/gzip"
|
|
"crypto/sha256"
|
|
"crypto/x509"
|
|
"encoding/asn1"
|
|
"encoding/base64"
|
|
"encoding/binary"
|
|
"encoding/hex"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"os"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"filippo.io/sunlight"
|
|
)
|
|
|
|
var (
|
|
ctLogCache = map[string]map[string]CTLogInfo{}
|
|
ctLogCacheMu sync.Mutex
|
|
|
|
issuerCache = map[string]*IssuerInfo{}
|
|
issuerCacheMu sync.Mutex
|
|
)
|
|
|
|
var (
|
|
oidSCTList = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
|
|
oidCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
|
|
)
|
|
|
|
// CTLogInfo holds details about a CT log from the log list.
|
|
type CTLogInfo struct {
|
|
Description string `json:"description"`
|
|
URL string `json:"url"`
|
|
Operator string `json:"operator"`
|
|
State string `json:"state"`
|
|
}
|
|
|
|
// SCT represents a Signed Certificate Timestamp.
|
|
type SCT struct {
|
|
Version int `json:"version"`
|
|
LogID string `json:"log_id"`
|
|
Timestamp int64 `json:"timestamp"`
|
|
TimestampHuman string `json:"timestamp_human"`
|
|
Extensions string `json:"extensions,omitempty"`
|
|
HashAlgorithm int `json:"hash_algorithm"`
|
|
SigAlgorithm int `json:"sig_algorithm"`
|
|
Signature string `json:"signature"`
|
|
CTLog *CTLogInfo `json:"ctlog,omitempty"`
|
|
}
|
|
|
|
const maxCompressRatio = 100
|
|
|
|
// Options controls which optional fields are fetched and included in output.
|
|
type Options struct {
|
|
LogURL string
|
|
ShowSCT bool
|
|
ShowIssuer bool
|
|
ShowCTLog bool
|
|
CTLogs map[string]CTLogInfo // keyed by hex log_id
|
|
}
|
|
|
|
// IssuerInfo holds parsed details of an issuer certificate fetched from the log.
|
|
type IssuerInfo struct {
|
|
Fingerprint string `json:"fingerprint"`
|
|
Subject string `json:"subject"`
|
|
Issuer string `json:"issuer"`
|
|
NotBefore string `json:"not_before"`
|
|
NotAfter string `json:"not_after"`
|
|
SerialNumber string `json:"serial_number"`
|
|
}
|
|
|
|
// CertDetails holds fields parsed from the certificate beyond what TrimmedEntry provides.
|
|
type CertDetails struct {
|
|
NotBefore string `json:"not_before"`
|
|
NotAfter string `json:"not_after"`
|
|
SerialNumber string `json:"serial_number"`
|
|
Issuer string `json:"issuer"`
|
|
EmailSANs []string `json:"email_sans,omitempty"`
|
|
URISANs []string `json:"uri_sans,omitempty"`
|
|
SubjectKeyID string `json:"subject_key_id,omitempty"`
|
|
AuthorityKeyID string `json:"authority_key_id,omitempty"`
|
|
OCSPServers []string `json:"ocsp_servers,omitempty"`
|
|
IssuingCertURLs []string `json:"issuing_cert_urls,omitempty"`
|
|
CRLDistributionPoints []string `json:"crl_distribution_points,omitempty"`
|
|
KeyUsage []string `json:"key_usage,omitempty"`
|
|
ExtKeyUsage []string `json:"ext_key_usage,omitempty"`
|
|
IsCA bool `json:"is_ca"`
|
|
PoisonExtension bool `json:"poison_extension,omitempty"`
|
|
}
|
|
|
|
var keyUsageNames = []struct {
|
|
usage x509.KeyUsage
|
|
name string
|
|
}{
|
|
{x509.KeyUsageDigitalSignature, "DigitalSignature"},
|
|
{x509.KeyUsageContentCommitment, "ContentCommitment"},
|
|
{x509.KeyUsageKeyEncipherment, "KeyEncipherment"},
|
|
{x509.KeyUsageDataEncipherment, "DataEncipherment"},
|
|
{x509.KeyUsageKeyAgreement, "KeyAgreement"},
|
|
{x509.KeyUsageCertSign, "CertSign"},
|
|
{x509.KeyUsageCRLSign, "CRLSign"},
|
|
{x509.KeyUsageEncipherOnly, "EncipherOnly"},
|
|
{x509.KeyUsageDecipherOnly, "DecipherOnly"},
|
|
}
|
|
|
|
var extKeyUsageNames = map[x509.ExtKeyUsage]string{
|
|
x509.ExtKeyUsageAny: "Any",
|
|
x509.ExtKeyUsageServerAuth: "ServerAuth",
|
|
x509.ExtKeyUsageClientAuth: "ClientAuth",
|
|
x509.ExtKeyUsageCodeSigning: "CodeSigning",
|
|
x509.ExtKeyUsageEmailProtection: "EmailProtection",
|
|
x509.ExtKeyUsageIPSECEndSystem: "IPSECEndSystem",
|
|
x509.ExtKeyUsageIPSECTunnel: "IPSECTunnel",
|
|
x509.ExtKeyUsageIPSECUser: "IPSECUser",
|
|
x509.ExtKeyUsageTimeStamping: "TimeStamping",
|
|
x509.ExtKeyUsageOCSPSigning: "OCSPSigning",
|
|
x509.ExtKeyUsageMicrosoftServerGatedCrypto: "MicrosoftServerGatedCrypto",
|
|
x509.ExtKeyUsageNetscapeServerGatedCrypto: "NetscapeServerGatedCrypto",
|
|
x509.ExtKeyUsageMicrosoftCommercialCodeSigning: "MicrosoftCommercialCodeSigning",
|
|
x509.ExtKeyUsageMicrosoftKernelCodeSigning: "MicrosoftKernelCodeSigning",
|
|
}
|
|
|
|
// Entry represents a CT log entry in JSON format.
|
|
type Entry struct {
|
|
EntryNumber int `json:"entry_number"`
|
|
LeafIndex int64 `json:"leaf_index"`
|
|
MerkleLeafHash string `json:"merkle_leaf_hash"`
|
|
Timestamp int64 `json:"timestamp"`
|
|
TimestampHuman string `json:"timestamp_human"`
|
|
IsPrecert bool `json:"is_precert"`
|
|
IssuerKeyHash string `json:"issuer_key_hash,omitempty"`
|
|
CertificateSize int `json:"certificate_size"`
|
|
PreCertificateSize *int `json:"precertificate_size,omitempty"`
|
|
ChainFingerprints []string `json:"chain_fingerprints"`
|
|
Issuers []IssuerInfo `json:"issuers,omitempty"`
|
|
SCTs []SCT `json:"scts,omitempty"`
|
|
CertDetails *CertDetails `json:"cert_details,omitempty"`
|
|
ParsedCertInfo json.RawMessage `json:"parsed_cert_info,omitempty"`
|
|
}
|
|
|
|
// HashTileOutput represents a hash tile in JSON format.
|
|
type HashTileOutput struct {
|
|
NumHashes int `json:"num_hashes"`
|
|
Hashes []string `json:"hashes"`
|
|
}
|
|
|
|
// DumpResult is the result of dumping entries or hashes from a tile.
|
|
type DumpResult struct {
|
|
Entries []Entry `json:"entries,omitempty"`
|
|
HashTile *HashTileOutput `json:"hash_tile,omitempty"`
|
|
TotalEntries int `json:"total_entries,omitempty"`
|
|
}
|
|
|
|
// FetchURL fetches data from a URL.
|
|
func FetchURL(url string) ([]byte, error) {
|
|
fmt.Fprintf(os.Stderr, "Fetching: %s\n", url)
|
|
resp, err := http.Get(url)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer func() { _ = resp.Body.Close() }()
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
return nil, fmt.Errorf("HTTP %d", resp.StatusCode)
|
|
}
|
|
|
|
return io.ReadAll(resp.Body)
|
|
}
|
|
|
|
// FetchTile fetches a tile from a URL, falling back from partial to full tile on 404.
|
|
func FetchTile(url string) ([]byte, error) {
|
|
data, err := FetchURL(url)
|
|
if err == nil {
|
|
return data, nil
|
|
}
|
|
// On 404, try stripping the partial-tile suffix (.p/NNN)
|
|
if err.Error() == "HTTP 404" {
|
|
if idx := strings.Index(url, ".p/"); idx != -1 {
|
|
return FetchURL(url[:idx])
|
|
}
|
|
}
|
|
return nil, err
|
|
}
|
|
|
|
// Decompress decompresses gzip-compressed data, or returns the data as-is if not compressed.
|
|
func Decompress(data []byte) ([]byte, error) {
|
|
r, err := gzip.NewReader(bytes.NewReader(data))
|
|
if err != nil {
|
|
// Not gzipped, return as-is
|
|
return data, nil
|
|
}
|
|
maxSize := int64(len(data)) * maxCompressRatio
|
|
return io.ReadAll(io.LimitReader(r, maxSize))
|
|
}
|
|
|
|
// DumpAllEntries reads and returns all entries from tile data as JSON-serializable structures.
|
|
// Automatically detects if the tile is a data tile or hash tile.
|
|
func DumpAllEntries(tileData []byte, opts Options) (*DumpResult, error) {
|
|
// Try to read as data tile first
|
|
result, err := dumpDataTile(tileData, opts)
|
|
if err != nil {
|
|
// If it fails, try as hash tile
|
|
fmt.Fprintf(os.Stderr, "Not a data tile, trying as hash tile...\n")
|
|
return dumpHashTile(tileData, opts)
|
|
}
|
|
return result, nil
|
|
}
|
|
|
|
func dumpDataTile(tileData []byte, opts Options) (*DumpResult, error) {
|
|
entryNum := 0
|
|
var entries []Entry
|
|
for len(tileData) > 0 {
|
|
e, remaining, err := sunlight.ReadTileLeaf(tileData)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to read entry %d: %w", entryNum, err)
|
|
}
|
|
tileData = remaining
|
|
|
|
entry := convertEntry(e, entryNum, opts)
|
|
entries = append(entries, entry)
|
|
entryNum++
|
|
}
|
|
|
|
return &DumpResult{
|
|
Entries: entries,
|
|
TotalEntries: entryNum,
|
|
}, nil
|
|
}
|
|
|
|
func dumpHashTile(tileData []byte, opts Options) (*DumpResult, error) {
|
|
if opts.ShowSCT || opts.ShowIssuer || opts.ShowCTLog {
|
|
return nil, fmt.Errorf("+sct, +issuer, +ctlog, and +all are not valid for hash tiles (only data tiles contain certificates)")
|
|
}
|
|
|
|
const hashSize = 32 // SHA-256 hash size
|
|
|
|
if len(tileData)%hashSize != 0 {
|
|
return nil, fmt.Errorf("invalid hash tile: size %d is not a multiple of %d", len(tileData), hashSize)
|
|
}
|
|
|
|
numHashes := len(tileData) / hashSize
|
|
hashes := make([]string, numHashes)
|
|
|
|
for i := 0; i < numHashes; i++ {
|
|
hash := tileData[i*hashSize : (i+1)*hashSize]
|
|
hashes[i] = hex.EncodeToString(hash)
|
|
}
|
|
|
|
return &DumpResult{
|
|
HashTile: &HashTileOutput{
|
|
NumHashes: numHashes,
|
|
Hashes: hashes,
|
|
},
|
|
}, nil
|
|
}
|
|
|
|
// DumpEntryAtPosition reads and returns a specific entry at the given position.
|
|
func DumpEntryAtPosition(tileData []byte, position int, expectedIndex int64, opts Options) (*Entry, error) {
|
|
entryNum := 0
|
|
for len(tileData) > 0 {
|
|
e, remaining, err := sunlight.ReadTileLeaf(tileData)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to read entry %d: %w", entryNum, err)
|
|
}
|
|
tileData = remaining
|
|
|
|
if entryNum == position {
|
|
if e.LeafIndex != expectedIndex {
|
|
fmt.Fprintf(os.Stderr, "WARNING: Expected leaf index %d but found %d at position %d\n",
|
|
expectedIndex, e.LeafIndex, position)
|
|
}
|
|
entry := convertEntry(e, entryNum, opts)
|
|
return &entry, nil
|
|
}
|
|
entryNum++
|
|
}
|
|
|
|
return nil, fmt.Errorf("position %d not found in tile (only %d entries)", position, entryNum)
|
|
}
|
|
|
|
// parseEmbeddedSCTs extracts SCTs from the SCT list extension of a DER-encoded certificate.
|
|
func parseEmbeddedSCTs(certDER []byte) ([]SCT, error) {
|
|
cert, err := x509.ParseCertificate(certDER)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("parse certificate: %w", err)
|
|
}
|
|
|
|
for _, ext := range cert.Extensions {
|
|
if !ext.Id.Equal(oidSCTList) {
|
|
continue
|
|
}
|
|
// ext.Value is the DER encoding of the extension value, which is an OCTET STRING
|
|
// wrapping the TLS-encoded SCTList.
|
|
var inner []byte
|
|
if rest, err := asn1.Unmarshal(ext.Value, &inner); err != nil || len(rest) != 0 {
|
|
return nil, fmt.Errorf("unmarshal SCT extension: %w", err)
|
|
}
|
|
return parseSCTList(inner)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
// parseSCTList parses a TLS-encoded SignedCertificateTimestampList.
|
|
func parseSCTList(data []byte) ([]SCT, error) {
|
|
if len(data) < 2 {
|
|
return nil, fmt.Errorf("SCT list too short")
|
|
}
|
|
listLen := int(binary.BigEndian.Uint16(data[:2]))
|
|
data = data[2:]
|
|
if len(data) < listLen {
|
|
return nil, fmt.Errorf("SCT list truncated")
|
|
}
|
|
data = data[:listLen]
|
|
|
|
var scts []SCT
|
|
for len(data) > 0 {
|
|
if len(data) < 2 {
|
|
return nil, fmt.Errorf("SCT entry length truncated")
|
|
}
|
|
sctLen := int(binary.BigEndian.Uint16(data[:2]))
|
|
data = data[2:]
|
|
if len(data) < sctLen {
|
|
return nil, fmt.Errorf("SCT entry truncated")
|
|
}
|
|
sct, err := parseSCT(data[:sctLen])
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
scts = append(scts, sct)
|
|
data = data[sctLen:]
|
|
}
|
|
return scts, nil
|
|
}
|
|
|
|
// parseSCT parses a single v1 SCT from raw bytes.
|
|
func parseSCT(data []byte) (SCT, error) {
|
|
// version(1) + log_id(32) + timestamp(8) + ext_len(2) = 43 bytes minimum
|
|
if len(data) < 43 {
|
|
return SCT{}, fmt.Errorf("SCT too short: %d bytes", len(data))
|
|
}
|
|
version := int(data[0])
|
|
logID := hex.EncodeToString(data[1:33])
|
|
ts := int64(binary.BigEndian.Uint64(data[33:41]))
|
|
extLen := int(binary.BigEndian.Uint16(data[41:43]))
|
|
pos := 43
|
|
if len(data) < pos+extLen+4 {
|
|
return SCT{}, fmt.Errorf("SCT extensions/signature truncated")
|
|
}
|
|
extensions := ""
|
|
if extLen > 0 {
|
|
extensions = hex.EncodeToString(data[pos : pos+extLen])
|
|
}
|
|
pos += extLen
|
|
hashAlg := int(data[pos])
|
|
sigAlg := int(data[pos+1])
|
|
sigLen := int(binary.BigEndian.Uint16(data[pos+2 : pos+4]))
|
|
pos += 4
|
|
if len(data) < pos+sigLen {
|
|
return SCT{}, fmt.Errorf("SCT signature truncated")
|
|
}
|
|
sig := hex.EncodeToString(data[pos : pos+sigLen])
|
|
|
|
return SCT{
|
|
Version: version,
|
|
LogID: logID,
|
|
Timestamp: ts,
|
|
TimestampHuman: time.UnixMilli(ts).UTC().Format(time.RFC3339),
|
|
Extensions: extensions,
|
|
HashAlgorithm: hashAlg,
|
|
SigAlgorithm: sigAlg,
|
|
Signature: sig,
|
|
}, nil
|
|
}
|
|
|
|
// FetchCTLogList fetches the CT log list JSON and returns a map keyed by hex log_id.
|
|
// Results are cached by URL so the network is only hit once per process.
|
|
func FetchCTLogList(url string) (map[string]CTLogInfo, error) {
|
|
ctLogCacheMu.Lock()
|
|
if cached, ok := ctLogCache[url]; ok {
|
|
ctLogCacheMu.Unlock()
|
|
return cached, nil
|
|
}
|
|
ctLogCacheMu.Unlock()
|
|
|
|
data, err := FetchURL(url)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
type logEntry struct {
|
|
Description string `json:"description"`
|
|
LogID string `json:"log_id"`
|
|
URL string `json:"url"`
|
|
SubmissionURL string `json:"submission_url"`
|
|
MonitoringURL string `json:"monitoring_url"`
|
|
State map[string]json.RawMessage `json:"state"`
|
|
}
|
|
var list struct {
|
|
Operators []struct {
|
|
Name string `json:"name"`
|
|
Logs []logEntry `json:"logs"`
|
|
TiledLogs []logEntry `json:"tiled_logs"`
|
|
} `json:"operators"`
|
|
}
|
|
if err := json.Unmarshal(data, &list); err != nil {
|
|
return nil, fmt.Errorf("parse log list: %w", err)
|
|
}
|
|
|
|
result := make(map[string]CTLogInfo)
|
|
for _, op := range list.Operators {
|
|
for _, log := range append(append([]logEntry{}, op.Logs...), op.TiledLogs...) {
|
|
raw, err := base64.StdEncoding.DecodeString(log.LogID)
|
|
if err != nil {
|
|
continue
|
|
}
|
|
hexID := hex.EncodeToString(raw)
|
|
state := ""
|
|
for k := range log.State {
|
|
state = k
|
|
break
|
|
}
|
|
url := log.URL
|
|
if url == "" {
|
|
url = log.MonitoringURL
|
|
}
|
|
if url == "" {
|
|
url = log.SubmissionURL
|
|
}
|
|
result[hexID] = CTLogInfo{
|
|
Description: log.Description,
|
|
URL: url,
|
|
Operator: op.Name,
|
|
State: state,
|
|
}
|
|
}
|
|
}
|
|
ctLogCacheMu.Lock()
|
|
ctLogCache[url] = result
|
|
ctLogCacheMu.Unlock()
|
|
|
|
return result, nil
|
|
}
|
|
|
|
// fetchIssuer fetches the issuer certificate at /issuer/<fingerprint> and returns parsed info.
|
|
// Results are cached by URL so the same issuer is only fetched once per process.
|
|
func fetchIssuer(logURL, fingerprint string) (*IssuerInfo, error) {
|
|
url := logURL + "/issuer/" + fingerprint
|
|
|
|
issuerCacheMu.Lock()
|
|
if cached, ok := issuerCache[url]; ok {
|
|
issuerCacheMu.Unlock()
|
|
return cached, nil
|
|
}
|
|
issuerCacheMu.Unlock()
|
|
|
|
data, err := FetchURL(url)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
cert, err := x509.ParseCertificate(data)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("parse issuer cert: %w", err)
|
|
}
|
|
info := &IssuerInfo{
|
|
Fingerprint: fingerprint,
|
|
Subject: cert.Subject.String(),
|
|
Issuer: cert.Issuer.String(),
|
|
NotBefore: cert.NotBefore.UTC().Format(time.RFC3339),
|
|
NotAfter: cert.NotAfter.UTC().Format(time.RFC3339),
|
|
SerialNumber: cert.SerialNumber.String(),
|
|
}
|
|
|
|
issuerCacheMu.Lock()
|
|
issuerCache[url] = info
|
|
issuerCacheMu.Unlock()
|
|
|
|
return info, nil
|
|
}
|
|
|
|
// parseCertDetails extracts certificate fields not covered by TrimmedEntry.
|
|
func parseCertDetails(certDER []byte) *CertDetails {
|
|
// Proceed as long as a cert was returned, even if there are unhandled
|
|
// critical extensions (e.g. the CT poison extension in precertificates).
|
|
cert, _ := x509.ParseCertificate(certDER)
|
|
if cert == nil {
|
|
return nil
|
|
}
|
|
|
|
d := &CertDetails{
|
|
NotBefore: cert.NotBefore.UTC().Format(time.RFC3339),
|
|
NotAfter: cert.NotAfter.UTC().Format(time.RFC3339),
|
|
SerialNumber: cert.SerialNumber.String(),
|
|
Issuer: cert.Issuer.String(),
|
|
OCSPServers: cert.OCSPServer,
|
|
IssuingCertURLs: cert.IssuingCertificateURL,
|
|
CRLDistributionPoints: cert.CRLDistributionPoints,
|
|
IsCA: cert.IsCA,
|
|
}
|
|
|
|
d.EmailSANs = append(d.EmailSANs, cert.EmailAddresses...)
|
|
for _, u := range cert.URIs {
|
|
d.URISANs = append(d.URISANs, u.String())
|
|
}
|
|
if len(cert.SubjectKeyId) > 0 {
|
|
d.SubjectKeyID = hex.EncodeToString(cert.SubjectKeyId)
|
|
}
|
|
if len(cert.AuthorityKeyId) > 0 {
|
|
d.AuthorityKeyID = hex.EncodeToString(cert.AuthorityKeyId)
|
|
}
|
|
for _, ku := range keyUsageNames {
|
|
if cert.KeyUsage&ku.usage != 0 {
|
|
d.KeyUsage = append(d.KeyUsage, ku.name)
|
|
}
|
|
}
|
|
for _, eku := range cert.ExtKeyUsage {
|
|
if name, ok := extKeyUsageNames[eku]; ok {
|
|
d.ExtKeyUsage = append(d.ExtKeyUsage, name)
|
|
}
|
|
}
|
|
for _, ext := range cert.Extensions {
|
|
if ext.Id.Equal(oidCTPoison) {
|
|
d.PoisonExtension = true
|
|
break
|
|
}
|
|
}
|
|
return d
|
|
}
|
|
|
|
// merkleLeafHash computes the RFC 6962 Merkle leaf hash: SHA-256(0x00 || leaf).
|
|
func merkleLeafHash(leaf []byte) string {
|
|
h := sha256.New()
|
|
h.Write([]byte{0x00})
|
|
h.Write(leaf)
|
|
return hex.EncodeToString(h.Sum(nil))
|
|
}
|
|
|
|
func convertEntry(e *sunlight.LogEntry, entryNum int, opts Options) Entry {
|
|
entry := Entry{
|
|
EntryNumber: entryNum,
|
|
LeafIndex: e.LeafIndex,
|
|
MerkleLeafHash: merkleLeafHash(e.MerkleTreeLeaf()),
|
|
Timestamp: e.Timestamp,
|
|
TimestampHuman: time.UnixMilli(e.Timestamp).UTC().Format(time.RFC3339),
|
|
IsPrecert: e.IsPrecert,
|
|
CertificateSize: len(e.Certificate),
|
|
}
|
|
|
|
if e.IsPrecert {
|
|
entry.IssuerKeyHash = hex.EncodeToString(e.IssuerKeyHash[:])
|
|
}
|
|
|
|
if e.PreCertificate != nil {
|
|
size := len(e.PreCertificate)
|
|
entry.PreCertificateSize = &size
|
|
}
|
|
|
|
// Convert chain fingerprints to hex strings and optionally fetch issuer details.
|
|
entry.ChainFingerprints = make([]string, len(e.ChainFingerprints))
|
|
for i, fp := range e.ChainFingerprints {
|
|
entry.ChainFingerprints[i] = hex.EncodeToString(fp[:])
|
|
}
|
|
if opts.ShowIssuer && opts.LogURL != "" {
|
|
for _, fp := range entry.ChainFingerprints {
|
|
info, err := fetchIssuer(opts.LogURL, fp)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "WARNING: could not fetch issuer %s: %v\n", fp, err)
|
|
continue
|
|
}
|
|
entry.Issuers = append(entry.Issuers, *info)
|
|
}
|
|
}
|
|
|
|
// Optionally extract embedded SCTs from final (non-precert) certificates.
|
|
if opts.ShowSCT && !e.IsPrecert && len(e.Certificate) > 0 {
|
|
if scts, err := parseEmbeddedSCTs(e.Certificate); err == nil {
|
|
if opts.ShowCTLog {
|
|
for i := range scts {
|
|
if info, ok := opts.CTLogs[scts[i].LogID]; ok {
|
|
scts[i].CTLog = &info
|
|
}
|
|
}
|
|
}
|
|
entry.SCTs = scts
|
|
}
|
|
}
|
|
|
|
// Parse extended certificate details.
|
|
certDER := e.Certificate
|
|
if e.IsPrecert {
|
|
certDER = e.PreCertificate
|
|
}
|
|
if len(certDER) > 0 {
|
|
entry.CertDetails = parseCertDetails(certDER)
|
|
}
|
|
|
|
// Try to extract parsed certificate info
|
|
if trimmed, err := e.TrimmedEntry(); err == nil {
|
|
if data, err := json.Marshal(trimmed); err == nil {
|
|
entry.ParsedCertInfo = data
|
|
}
|
|
}
|
|
|
|
return entry
|
|
}
|