Compare commits
10 Commits
07c372e6d2
...
f1ee4722c2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1ee4722c2 | ||
|
|
fa6df14ce7 | ||
|
|
7a8e469baa | ||
|
|
0b687bf9d9 | ||
|
|
99ad6fbff1 | ||
|
|
60a149b669 | ||
|
|
16fa899b91 | ||
|
|
7829000c55 | ||
|
|
11fbbd4b42 | ||
|
|
2274372119 |
1
Makefile
1
Makefile
@@ -27,6 +27,7 @@ clean:
|
||||
@echo "Cleaning build artifacts..."
|
||||
rm -f s3-genindex
|
||||
rm -f coverage.out coverage.html
|
||||
find . -name index.html -delete
|
||||
@echo "Clean complete"
|
||||
|
||||
# Wipe everything including test caches
|
||||
|
||||
34
README.md
34
README.md
@@ -1,6 +1,19 @@
|
||||
# s3-genindex
|
||||
|
||||
Generate HTML directory indexes with file type icons and responsive design.
|
||||
Generate HTML directory indexes with file type icons and responsive design for local directories and S3-compatible storage.
|
||||
This is particularly useful for S3 buckets that are publicly readable.
|
||||
|
||||

|
||||
|
||||
## Features
|
||||
|
||||
- **Local directory indexing** with recursive traversal
|
||||
- **S3-compatible storage support** (MinIO, AWS S3, etc.)
|
||||
- **Hierarchical directory structure** for S3 buckets
|
||||
- **Responsive HTML design** with file type icons
|
||||
- **Dry run mode** for testing
|
||||
- **Flexible filtering** with glob patterns and regex exclusion
|
||||
- **Hidden file control** and index.html visibility options
|
||||
|
||||
## Install
|
||||
|
||||
@@ -8,17 +21,20 @@ Generate HTML directory indexes with file type icons and responsive design.
|
||||
go install git.ipng.ch/ipng/s3-genindex/cmd/s3-genindex@latest
|
||||
```
|
||||
|
||||
## Usage
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Generate index.html in current directory
|
||||
s3-genindex
|
||||
# Local directory
|
||||
s3-genindex -d /path/to/dir
|
||||
|
||||
# Generate recursively with custom output
|
||||
s3-genindex -r -o listing.html /path/to/dir
|
||||
# S3 bucket (requires AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY)
|
||||
s3-genindex -s3 http://minio.example.com:9000/bucket
|
||||
|
||||
# Exclude files by regex
|
||||
s3-genindex -x "(build|node_modules|\.tmp)"
|
||||
# Dry run to see what would be generated
|
||||
s3-genindex -d /path/to/dir -n
|
||||
|
||||
# Show index.html files in listings
|
||||
s3-genindex -d /path/to/dir -i
|
||||
```
|
||||
|
||||
## Build
|
||||
@@ -28,4 +44,4 @@ make build
|
||||
make test
|
||||
```
|
||||
|
||||
See [docs/DETAILS.md](docs/DETAILS.md) for complete documentation.
|
||||
See [docs/DETAILS.md](docs/DETAILS.md) for complete documentation and examples.
|
||||
|
||||
@@ -28,6 +28,13 @@ type S3Config struct {
|
||||
UseSSL bool
|
||||
}
|
||||
|
||||
// S3Object represents an S3 object
|
||||
type S3Object struct {
|
||||
Key string
|
||||
Size int64
|
||||
LastModified time.Time
|
||||
}
|
||||
|
||||
// parseS3URL parses S3 URL and extracts endpoint and bucket
|
||||
// Example: http://minio0.chbtl0.net.ipng.ch:9000/ctlog-ro
|
||||
// Returns: endpoint=minio0.chbtl0.net.ipng.ch:9000, bucket=ctlog-ro, useSSL=false
|
||||
@@ -62,15 +69,6 @@ func parseS3URL(s3URL string) (*S3Config, error) {
|
||||
|
||||
// processS3Bucket processes an S3 bucket and generates index files
|
||||
func processS3Bucket(s3Config *S3Config, opts *indexgen.Options) error {
|
||||
if opts.DryRun {
|
||||
// In dry run mode, just show what would be done without connecting
|
||||
fmt.Printf("Would connect to S3 endpoint: %s\n", s3Config.Endpoint)
|
||||
fmt.Printf("Would list objects in bucket: %s\n", s3Config.Bucket)
|
||||
fmt.Printf("Would write S3 index file: %s\n", opts.OutputFile)
|
||||
fmt.Printf("Note: Dry run mode - no actual S3 connection made\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get credentials from environment variables
|
||||
accessKey := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
@@ -108,8 +106,8 @@ func processS3Bucket(s3Config *S3Config, opts *indexgen.Options) error {
|
||||
return fmt.Errorf("failed to list S3 objects: %w", err)
|
||||
}
|
||||
|
||||
// Convert S3 objects to FileEntry format
|
||||
var entries []indexgen.FileEntry
|
||||
// Collect all S3 objects
|
||||
var allObjects []S3Object
|
||||
for _, obj := range result.Contents {
|
||||
if obj.Key == nil {
|
||||
continue
|
||||
@@ -127,6 +125,11 @@ func processS3Bucket(s3Config *S3Config, opts *indexgen.Options) error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip index.html files unless ShowIndexFiles is enabled
|
||||
if !opts.ShowIndexFiles && strings.HasSuffix(keyName, opts.OutputFile) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Simple glob matching for filter
|
||||
if opts.Filter != "*" && opts.Filter != "" {
|
||||
matched, err := filepath.Match(opts.Filter, keyName)
|
||||
@@ -135,101 +138,203 @@ func processS3Bucket(s3Config *S3Config, opts *indexgen.Options) error {
|
||||
}
|
||||
}
|
||||
|
||||
entry := indexgen.FileEntry{
|
||||
Name: keyName,
|
||||
Path: keyName,
|
||||
IsDir: false,
|
||||
allObjects = append(allObjects, S3Object{
|
||||
Key: keyName,
|
||||
Size: *obj.Size,
|
||||
ModTime: *obj.LastModified,
|
||||
IsSymlink: false,
|
||||
IconType: indexgen.GetIconType(keyName),
|
||||
SizePretty: indexgen.PrettySize(*obj.Size),
|
||||
ModTimeISO: obj.LastModified.Format(time.RFC3339),
|
||||
ModTimeHuman: obj.LastModified.Format(time.RFC822),
|
||||
}
|
||||
|
||||
// Set CSS class based on file type
|
||||
if entry.IsDir {
|
||||
entry.CSSClass = "dir"
|
||||
} else if entry.IsSymlink {
|
||||
entry.CSSClass = "symlink"
|
||||
} else {
|
||||
entry.CSSClass = "file"
|
||||
}
|
||||
|
||||
entries = append(entries, entry)
|
||||
LastModified: *obj.LastModified,
|
||||
})
|
||||
|
||||
if opts.Verbose {
|
||||
log.Printf("Found object: %s (%s)", entry.Name, entry.SizePretty)
|
||||
log.Printf("Found object: %s (%s)", keyName, indexgen.PrettySize(*obj.Size))
|
||||
}
|
||||
}
|
||||
|
||||
// Process hierarchical directory structure
|
||||
return processS3Hierarchy(allObjects, opts, client, s3Config)
|
||||
}
|
||||
|
||||
// processS3Hierarchy processes S3 objects hierarchically like filesystem directories
|
||||
func processS3Hierarchy(objects []S3Object, opts *indexgen.Options, client *s3.Client, s3Config *S3Config) error {
|
||||
// Group objects by directory path
|
||||
dirMap := make(map[string][]indexgen.FileEntry)
|
||||
|
||||
// Track all directory paths we need to create indexes for
|
||||
allDirs := make(map[string]bool)
|
||||
|
||||
for _, obj := range objects {
|
||||
// Split the key into directory parts
|
||||
parts := strings.Split(obj.Key, "/")
|
||||
|
||||
if len(parts) == 1 {
|
||||
// Root level file
|
||||
entry := createFileEntry(obj, obj.Key)
|
||||
dirMap[""] = append(dirMap[""], entry)
|
||||
} else {
|
||||
// File in a subdirectory
|
||||
fileName := parts[len(parts)-1]
|
||||
dirPath := strings.Join(parts[:len(parts)-1], "/")
|
||||
|
||||
// Create file entry
|
||||
entry := createFileEntry(obj, fileName)
|
||||
dirMap[dirPath] = append(dirMap[dirPath], entry)
|
||||
|
||||
// Track all parent directories
|
||||
currentPath := ""
|
||||
for i, part := range parts[:len(parts)-1] {
|
||||
if i == 0 {
|
||||
currentPath = part
|
||||
} else {
|
||||
currentPath = currentPath + "/" + part
|
||||
}
|
||||
allDirs[currentPath] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add directory entries to parent directories
|
||||
for dirPath := range allDirs {
|
||||
parentPath := ""
|
||||
if strings.Contains(dirPath, "/") {
|
||||
parts := strings.Split(dirPath, "/")
|
||||
parentPath = strings.Join(parts[:len(parts)-1], "/")
|
||||
}
|
||||
|
||||
dirName := filepath.Base(dirPath)
|
||||
// Build the correct relative path for S3 (relative to current directory)
|
||||
dirEntryPath := dirName + "/"
|
||||
if opts.DirAppend {
|
||||
dirEntryPath += opts.OutputFile
|
||||
}
|
||||
|
||||
dirEntry := indexgen.FileEntry{
|
||||
Name: dirName,
|
||||
Path: dirEntryPath,
|
||||
IsDir: true,
|
||||
Size: -1,
|
||||
IsSymlink: false,
|
||||
IconType: "folder",
|
||||
CSSClass: "folder_filled",
|
||||
SizePretty: "—",
|
||||
ModTimeISO: time.Now().Format(time.RFC3339),
|
||||
ModTimeHuman: time.Now().Format(time.RFC822),
|
||||
}
|
||||
|
||||
dirMap[parentPath] = append(dirMap[parentPath], dirEntry)
|
||||
}
|
||||
|
||||
// Set TopDir to bucket name for template generation
|
||||
opts.TopDir = s3Config.Bucket
|
||||
|
||||
// Generate HTML from entries - need to implement this function
|
||||
return generateS3HTML(entries, opts)
|
||||
// Generate index.html for each directory
|
||||
for dirPath, entries := range dirMap {
|
||||
indexKey := dirPath
|
||||
if indexKey != "" {
|
||||
indexKey += "/"
|
||||
}
|
||||
indexKey += opts.OutputFile
|
||||
|
||||
err := generateS3HTML(entries, opts, client, s3Config, indexKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate index for %s: %w", dirPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateS3HTML generates HTML index for S3 objects using the existing template system
|
||||
func generateS3HTML(entries []indexgen.FileEntry, opts *indexgen.Options) error {
|
||||
// createFileEntry creates a FileEntry from an S3Object
|
||||
func createFileEntry(obj S3Object, displayName string) indexgen.FileEntry {
|
||||
return indexgen.FileEntry{
|
||||
Name: displayName,
|
||||
Path: displayName,
|
||||
IsDir: false,
|
||||
Size: obj.Size,
|
||||
ModTime: obj.LastModified,
|
||||
IsSymlink: false,
|
||||
IconType: indexgen.GetIconType(displayName),
|
||||
CSSClass: "file",
|
||||
SizePretty: indexgen.PrettySize(obj.Size),
|
||||
ModTimeISO: obj.LastModified.Format(time.RFC3339),
|
||||
ModTimeHuman: obj.LastModified.Format(time.RFC822),
|
||||
}
|
||||
}
|
||||
|
||||
// generateS3HTML generates HTML index for S3 objects and uploads to S3
|
||||
func generateS3HTML(entries []indexgen.FileEntry, opts *indexgen.Options, client *s3.Client, s3Config *S3Config, indexKey string) error {
|
||||
// Sort entries by name (similar to filesystem behavior)
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
return entries[i].Name < entries[j].Name
|
||||
})
|
||||
|
||||
// Determine output file
|
||||
outputFile := opts.OutputFile
|
||||
if outputFile == "" {
|
||||
outputFile = indexgen.DefaultOutputFile
|
||||
}
|
||||
// Use the provided index key
|
||||
|
||||
if opts.DryRun {
|
||||
// Dry run mode: show what would be written
|
||||
fmt.Printf("Would write S3 index file: %s\n", outputFile)
|
||||
fmt.Printf("S3 bucket: %s\n", opts.TopDir)
|
||||
fmt.Printf("Objects found: %d\n", len(entries))
|
||||
for _, entry := range entries {
|
||||
fmt.Printf(" object: %s (%s)\n", entry.Name, entry.SizePretty)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Normal mode: actually write the file
|
||||
// Get the HTML template
|
||||
tmpl := indexgen.GetHTMLTemplate()
|
||||
if tmpl == nil {
|
||||
return fmt.Errorf("failed to get HTML template")
|
||||
}
|
||||
|
||||
// Determine if we're at root level (no parent directory)
|
||||
isRoot := (indexKey == opts.OutputFile) // root level index.html
|
||||
|
||||
// Prepare template data (similar to ProcessDir in indexgen)
|
||||
data := struct {
|
||||
DirName string
|
||||
Entries []indexgen.FileEntry
|
||||
DirAppend bool
|
||||
OutputFile string
|
||||
DirName string
|
||||
Entries []indexgen.FileEntry
|
||||
DirAppend bool
|
||||
OutputFile string
|
||||
IsRoot bool
|
||||
WatermarkURL string
|
||||
}{
|
||||
DirName: opts.TopDir, // Use bucket name as directory name
|
||||
Entries: entries,
|
||||
DirAppend: opts.DirAppend,
|
||||
OutputFile: opts.OutputFile,
|
||||
DirName: opts.TopDir, // Use bucket name as directory name
|
||||
Entries: entries,
|
||||
DirAppend: opts.DirAppend,
|
||||
OutputFile: opts.OutputFile,
|
||||
IsRoot: isRoot,
|
||||
WatermarkURL: opts.WatermarkURL,
|
||||
}
|
||||
|
||||
// Create output file
|
||||
file, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file %s: %w", outputFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Execute template
|
||||
err = tmpl.Execute(file, data)
|
||||
// Generate HTML content in memory
|
||||
var htmlBuffer strings.Builder
|
||||
err := tmpl.Execute(&htmlBuffer, data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute template: %w", err)
|
||||
}
|
||||
|
||||
htmlContent := htmlBuffer.String()
|
||||
|
||||
if opts.DryRun {
|
||||
// Dry run mode: show what would be written but don't upload
|
||||
fmt.Printf("Would upload S3 index file: s3://%s/%s\n", s3Config.Bucket, indexKey)
|
||||
fmt.Printf("Directory level: %s\n", strings.TrimSuffix(indexKey, "/"+opts.OutputFile))
|
||||
fmt.Printf("Objects found: %d\n", len(entries))
|
||||
fmt.Printf("Generated HTML size: %d bytes\n", len(htmlContent))
|
||||
for _, entry := range entries {
|
||||
entryType := "file"
|
||||
if entry.IsDir {
|
||||
entryType = "directory"
|
||||
}
|
||||
fmt.Printf(" %s: %s (%s)\n", entryType, entry.Name, entry.SizePretty)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload HTML to S3
|
||||
ctx := context.Background()
|
||||
putInput := &s3.PutObjectInput{
|
||||
Bucket: aws.String(s3Config.Bucket),
|
||||
Key: aws.String(indexKey),
|
||||
Body: strings.NewReader(htmlContent),
|
||||
ContentType: aws.String("text/html"),
|
||||
}
|
||||
|
||||
_, err = client.PutObject(ctx, putInput)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload %s to S3: %w", indexKey, err)
|
||||
}
|
||||
|
||||
if opts.Verbose {
|
||||
log.Printf("Generated index file: %s (%d entries)", outputFile, len(entries))
|
||||
log.Printf("Uploaded index file: %s to S3 bucket %s (%d entries)", indexKey, s3Config.Bucket, len(entries))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -241,6 +346,8 @@ func main() {
|
||||
var directory string
|
||||
var s3URL string
|
||||
var dryRun bool
|
||||
var showIndexFiles bool
|
||||
var watermarkURL string
|
||||
|
||||
// Set defaults
|
||||
opts.DirAppend = true
|
||||
@@ -254,6 +361,8 @@ func main() {
|
||||
flag.BoolVar(&dryRun, "n", false, "dry run: show what would be written without actually writing")
|
||||
flag.StringVar(&excludeRegexStr, "x", "", "exclude files matching regular expression")
|
||||
flag.BoolVar(&opts.Verbose, "v", false, "verbosely list every processed file")
|
||||
flag.BoolVar(&showIndexFiles, "i", false, "show index.html files in directory listings")
|
||||
flag.StringVar(&watermarkURL, "wm", "", "watermark logo URL to display in top left corner")
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintf(os.Stderr, "Generate directory index files (recursive is ON, hidden files included by default).\n")
|
||||
@@ -293,8 +402,10 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Set dry run flag
|
||||
// Set dry run, show index files, and watermark URL
|
||||
opts.DryRun = dryRun
|
||||
opts.ShowIndexFiles = showIndexFiles
|
||||
opts.WatermarkURL = watermarkURL
|
||||
|
||||
if s3URL != "" {
|
||||
// Parse S3 URL
|
||||
|
||||
247
docs/DETAILS.md
247
docs/DETAILS.md
@@ -2,18 +2,20 @@
|
||||
|
||||
## Overview
|
||||
|
||||
s3-genindex is a Go rewrite of the original Python genindex.py script. It generates HTML directory listings with file type icons, responsive design, and dark mode support.
|
||||
s3-genindex is a program that generates HTML directory listings with file type icons, responsive design, and dark mode support for both local directories and S3-compatible storage systems.
|
||||
|
||||
## Features
|
||||
|
||||
- **Local Directory Indexing**: Recursive traversal of filesystem directories
|
||||
- **S3-Compatible Storage**: Support for MinIO, AWS S3, and other S3-compatible systems
|
||||
- **Hierarchical Structure**: Creates proper directory navigation for S3 buckets
|
||||
- **File Type Detection**: Recognizes 100+ file extensions with appropriate icons
|
||||
- **Responsive Design**: Works on desktop and mobile devices
|
||||
- **Dark Mode**: Automatic dark mode support based on system preferences
|
||||
- **Recursive Processing**: Generate indexes for entire directory trees
|
||||
- **File Filtering**: Include/exclude files by pattern or regex
|
||||
- **Dry Run Mode**: Preview what would be generated without writing files
|
||||
- **File Filtering**: Include/exclude files by glob patterns or regex
|
||||
- **Symlink Support**: Special handling for symbolic links
|
||||
- **Custom Output**: Configurable output filename
|
||||
- **Breadcrumb Navigation**: Parent directory navigation
|
||||
- **Index File Control**: Show/hide index.html files in directory listings
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -34,66 +36,118 @@ go install git.ipng.ch/ipng/s3-genindex/cmd/s3-genindex@latest
|
||||
## Command Line Options
|
||||
|
||||
```
|
||||
Usage: s3-genindex [OPTIONS] [directory]
|
||||
Usage: s3-genindex [OPTIONS]
|
||||
|
||||
-d append output file to directory href
|
||||
-d string
|
||||
local directory to process
|
||||
-s3 string
|
||||
S3 URL to process
|
||||
-f string
|
||||
only include files matching glob (default "*")
|
||||
-i include dot hidden files
|
||||
-o string
|
||||
custom output file (default "index.html")
|
||||
-r recursively process nested dirs
|
||||
-i show index.html files in directory listings
|
||||
-n dry run: show what would be written without actually writing
|
||||
-v verbosely list every processed file
|
||||
-x string
|
||||
exclude files matching regular expression
|
||||
```
|
||||
|
||||
**Note**: Either `-d <directory>` or `-s3 <url>` must be specified (mutually exclusive).
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Usage
|
||||
### Local Directory Processing (`-d`)
|
||||
|
||||
```bash
|
||||
# Generate index.html in current directory
|
||||
s3-genindex
|
||||
# Generate index.html for a local directory
|
||||
s3-genindex -d /var/www/html
|
||||
|
||||
# Generate index for specific directory
|
||||
s3-genindex /path/to/directory
|
||||
# Process with verbose output to see all files
|
||||
s3-genindex -d /home/user/documents -v
|
||||
|
||||
# Generate with custom output filename
|
||||
s3-genindex -o listing.html
|
||||
# Dry run to preview what would be generated
|
||||
s3-genindex -d /path/to/dir -n
|
||||
|
||||
# Show index.html files in directory listings
|
||||
s3-genindex -d /var/www -i
|
||||
```
|
||||
|
||||
### Recursive Processing
|
||||
### S3 Storage Processing (`-s3`)
|
||||
|
||||
```bash
|
||||
# Process directory tree recursively
|
||||
s3-genindex -r
|
||||
# Basic S3 bucket processing (MinIO)
|
||||
export AWS_ACCESS_KEY_ID="your-access-key"
|
||||
export AWS_SECRET_ACCESS_KEY="your-secret-key"
|
||||
s3-genindex -s3 http://minio.example.com:9000/my-bucket
|
||||
|
||||
# Process recursively with verbose output
|
||||
s3-genindex -rv /var/www
|
||||
# AWS S3 bucket processing
|
||||
export AWS_ACCESS_KEY_ID="your-aws-key"
|
||||
export AWS_SECRET_ACCESS_KEY="your-aws-secret"
|
||||
s3-genindex -s3 https://s3.amazonaws.com/my-bucket
|
||||
|
||||
# S3 with verbose output and dry run
|
||||
s3-genindex -s3 http://localhost:9000/test-bucket -v -n
|
||||
|
||||
# S3 processing with file filtering
|
||||
s3-genindex -s3 http://minio.local:9000/logs -f "*.log"
|
||||
```
|
||||
|
||||
### File Filtering
|
||||
### File Filtering Examples
|
||||
|
||||
```bash
|
||||
# Include only Python files
|
||||
s3-genindex -f "*.py"
|
||||
# Include only specific file types
|
||||
s3-genindex -d /var/log -f "*.log"
|
||||
s3-genindex -s3 http://minio:9000/images -f "*.{jpg,png,gif}"
|
||||
|
||||
# Exclude build artifacts and dependencies
|
||||
s3-genindex -x "(build|dist|node_modules|__pycache__|\\.tmp)"
|
||||
# Exclude build artifacts and temporary files
|
||||
s3-genindex -d /home/dev/project -x "(build|dist|node_modules|__pycache__|\\.tmp)"
|
||||
|
||||
# Include hidden files
|
||||
s3-genindex -i
|
||||
# Exclude version control and system files
|
||||
s3-genindex -d /var/www -x "(\.git|\.svn|\.DS_Store|Thumbs\.db)"
|
||||
|
||||
# Complex filtering with multiple patterns
|
||||
s3-genindex -d /data -f "*.{json,xml,csv}" -x "(backup|temp|cache)"
|
||||
```
|
||||
|
||||
### Advanced Usage
|
||||
### Advanced Usage Scenarios
|
||||
|
||||
```bash
|
||||
# Recursive with custom output and exclusions
|
||||
s3-genindex -r -o index.html -x "(\.git|\.svn|node_modules)" /var/www
|
||||
# Documentation site generation for local directory
|
||||
s3-genindex -d /var/www/docs -i -v
|
||||
|
||||
# Verbose processing with directory appending
|
||||
s3-genindex -r -v -d /home/user/public
|
||||
# Log file indexing on S3 with size filtering
|
||||
s3-genindex -s3 http://minio:9000/application-logs -f "*.log" -v
|
||||
|
||||
# Website asset indexing (excluding index files)
|
||||
s3-genindex -d /var/www/assets -x "(index\.html|\.htaccess)"
|
||||
|
||||
# Backup verification with dry run
|
||||
s3-genindex -s3 https://backup.s3.amazonaws.com/daily-backups -n -v
|
||||
|
||||
# Development file browsing with hidden files
|
||||
s3-genindex -d /home/dev/src -i -x "(\.git|node_modules|vendor)"
|
||||
|
||||
# Media gallery generation
|
||||
s3-genindex -d /var/media -f "*.{jpg,jpeg,png,gif,mp4,mov}" -i
|
||||
```
|
||||
|
||||
### Integration Examples
|
||||
|
||||
```bash
|
||||
# Automated documentation updates (cron job)
|
||||
#!/bin/bash
|
||||
export AWS_ACCESS_KEY_ID="docs-access-key"
|
||||
export AWS_SECRET_ACCESS_KEY="docs-secret-key"
|
||||
s3-genindex -s3 https://docs.s3.amazonaws.com/api-docs -v
|
||||
|
||||
# Local web server directory indexing
|
||||
s3-genindex -d /var/www/html -i
|
||||
nginx -s reload
|
||||
|
||||
# CI/CD artifact indexing
|
||||
s3-genindex -s3 http://artifacts.internal:9000/build-artifacts -f "*.{tar.gz,zip}" -v
|
||||
|
||||
# Photo gallery with metadata
|
||||
s3-genindex -d /var/photos -f "*.{jpg,jpeg,png,heic}" -i -v
|
||||
```
|
||||
|
||||
## File Type Support
|
||||
@@ -206,11 +260,40 @@ make lint # Run golangci-lint (if installed)
|
||||
make check # Run all quality checks
|
||||
```
|
||||
|
||||
## S3 Configuration
|
||||
|
||||
### Environment Variables for S3
|
||||
|
||||
When using S3 storage (`-s3` flag), the following environment variables are **required**:
|
||||
|
||||
- `AWS_ACCESS_KEY_ID`: Your S3 access key ID
|
||||
- `AWS_SECRET_ACCESS_KEY`: Your S3 secret access key
|
||||
|
||||
### S3 URL Format
|
||||
|
||||
S3 URLs should follow this format:
|
||||
```
|
||||
http://host:port/bucket # For MinIO or custom S3-compatible storage
|
||||
https://host/bucket # For HTTPS endpoints
|
||||
```
|
||||
|
||||
Examples:
|
||||
- `http://minio.example.com:9000/my-bucket`
|
||||
- `https://s3.amazonaws.com/my-bucket`
|
||||
- `http://localhost:9000/test-bucket`
|
||||
|
||||
### S3 Features
|
||||
|
||||
- **Hierarchical Processing**: Creates index.html files for each directory level in S3
|
||||
- **Path-Style URLs**: Uses path-style S3 URLs for MinIO compatibility
|
||||
- **Bucket Navigation**: Generates proper directory navigation within S3 buckets
|
||||
- **No Parent Directory at Root**: Root bucket index doesn't show parent (..) link
|
||||
|
||||
## Configuration
|
||||
|
||||
No configuration files are needed. All options are provided via command-line arguments.
|
||||
|
||||
### Environment Variables
|
||||
### Standard Environment Variables
|
||||
|
||||
The tool respects standard Go environment variables:
|
||||
- `GOOS` and `GOARCH` for cross-compilation
|
||||
@@ -240,25 +323,55 @@ This Go version provides the same functionality as the original Python script wi
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Permission Errors**
|
||||
**Local Directory Permission Errors**
|
||||
```bash
|
||||
# Ensure read permissions on target directory
|
||||
chmod +r /path/to/directory
|
||||
```
|
||||
|
||||
**Large Directories**
|
||||
**S3 Connection Issues**
|
||||
```bash
|
||||
# Use verbose mode to monitor progress
|
||||
s3-genindex -v /large/directory
|
||||
# Verify credentials are set
|
||||
echo $AWS_ACCESS_KEY_ID
|
||||
echo $AWS_SECRET_ACCESS_KEY
|
||||
|
||||
# Exclude unnecessary files
|
||||
s3-genindex -x "(\.git|node_modules|__pycache__)"
|
||||
# Test connection with dry run
|
||||
s3-genindex -s3 http://minio.example.com:9000/bucket -n -v
|
||||
|
||||
# Check S3 endpoint connectivity
|
||||
curl http://minio.example.com:9000/
|
||||
```
|
||||
|
||||
**Memory Usage**
|
||||
**S3 Permission Errors**
|
||||
```bash
|
||||
# Process directories individually for very large trees
|
||||
for dir in */; do s3-genindex "$dir"; done
|
||||
# Verify bucket access permissions
|
||||
aws s3 ls s3://your-bucket/ --endpoint-url http://minio.example.com:9000
|
||||
|
||||
# Check if bucket exists and is accessible
|
||||
s3-genindex -s3 http://minio.example.com:9000/bucket -v
|
||||
```
|
||||
|
||||
**Large Directory/Bucket Processing**
|
||||
```bash
|
||||
# Use verbose mode to monitor progress
|
||||
s3-genindex -d /large/directory -v
|
||||
s3-genindex -s3 http://minio:9000/large-bucket -v
|
||||
|
||||
# Exclude unnecessary files to reduce processing time
|
||||
s3-genindex -d /data -x "(\.git|node_modules|__pycache__|\.tmp)"
|
||||
s3-genindex -s3 http://minio:9000/bucket -x "(backup|temp|cache)"
|
||||
|
||||
# Use dry run to estimate processing time
|
||||
s3-genindex -s3 http://minio:9000/bucket -n
|
||||
```
|
||||
|
||||
**Network Timeout Issues (S3)**
|
||||
```bash
|
||||
# For slow connections, use verbose mode to see progress
|
||||
s3-genindex -s3 http://slow-endpoint:9000/bucket -v
|
||||
|
||||
# Test with smaller buckets first
|
||||
s3-genindex -s3 http://endpoint:9000/small-test-bucket -n
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
@@ -266,7 +379,27 @@ for dir in */; do s3-genindex "$dir"; done
|
||||
Enable verbose output to see detailed processing:
|
||||
|
||||
```bash
|
||||
s3-genindex -v /path/to/debug
|
||||
# Local directory debugging
|
||||
s3-genindex -d /path/to/debug -v
|
||||
|
||||
# S3 debugging with dry run
|
||||
s3-genindex -s3 http://minio:9000/bucket -n -v
|
||||
|
||||
# Full S3 processing with verbose output
|
||||
s3-genindex -s3 http://minio:9000/bucket -v
|
||||
```
|
||||
|
||||
### S3-Specific Debugging
|
||||
|
||||
```bash
|
||||
# Test S3 connectivity without processing
|
||||
curl -I http://minio.example.com:9000/bucket/
|
||||
|
||||
# List S3 objects directly (if aws-cli is available)
|
||||
aws s3 ls s3://bucket/ --endpoint-url http://minio.example.com:9000
|
||||
|
||||
# Verify S3 URL format
|
||||
s3-genindex -s3 http://wrong-format -n # Will show URL parsing errors
|
||||
```
|
||||
|
||||
## License
|
||||
@@ -283,8 +416,26 @@ Licensed under the Apache License 2.0. See original Python script for full licen
|
||||
|
||||
## Changelog
|
||||
|
||||
### v2.0.0 (Current)
|
||||
- **S3 Support**: Complete S3-compatible storage support (MinIO, AWS S3)
|
||||
- **Hierarchical S3 Processing**: Creates proper directory navigation for S3 buckets
|
||||
- **Dry Run Mode**: Preview functionality with `-n` flag
|
||||
- **Index File Control**: Show/hide index.html files with `-i` flag
|
||||
- **Mutual Exclusive Flags**: Clean separation between `-d` and `-s3` modes
|
||||
- **Enhanced Error Handling**: Better error messages and validation
|
||||
- **Comprehensive Testing**: Extended test suite covering S3 functionality
|
||||
- **URL Handling Fix**: Proper S3 navigation without URL encoding issues
|
||||
|
||||
### v1.0.0
|
||||
- Initial Go rewrite
|
||||
- Complete feature parity with Python version
|
||||
- Initial Go rewrite from Python genindex.py
|
||||
- Complete feature parity with Python version for local directories
|
||||
- Comprehensive test suite
|
||||
- Modern Go project structure
|
||||
- Recursive directory processing
|
||||
- File type detection and icons
|
||||
- Responsive HTML design with dark mode support
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
This tool was inspired by
|
||||
[[index-html-generator](https://github.com/glowinthedark/index-html-generator)] on GitHub.
|
||||
|
||||
BIN
docs/screenshot.png
Normal file
BIN
docs/screenshot.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 82 KiB |
@@ -158,15 +158,17 @@ var ExtensionTypes = map[string]string{
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
TopDir string
|
||||
Filter string
|
||||
OutputFile string
|
||||
DirAppend bool
|
||||
Recursive bool
|
||||
IncludeHidden bool
|
||||
ExcludeRegex *regexp.Regexp
|
||||
Verbose bool
|
||||
DryRun bool
|
||||
TopDir string
|
||||
Filter string
|
||||
OutputFile string
|
||||
DirAppend bool
|
||||
Recursive bool
|
||||
IncludeHidden bool
|
||||
ExcludeRegex *regexp.Regexp
|
||||
Verbose bool
|
||||
DryRun bool
|
||||
ShowIndexFiles bool
|
||||
WatermarkURL string
|
||||
}
|
||||
|
||||
type FileEntry struct {
|
||||
@@ -210,15 +212,19 @@ func ProcessDir(topDir string, opts *Options) error {
|
||||
})
|
||||
|
||||
templateData := struct {
|
||||
DirName string
|
||||
Entries []FileEntry
|
||||
DirAppend bool
|
||||
OutputFile string
|
||||
DirName string
|
||||
Entries []FileEntry
|
||||
DirAppend bool
|
||||
OutputFile string
|
||||
IsRoot bool
|
||||
WatermarkURL string
|
||||
}{
|
||||
DirName: dirName,
|
||||
Entries: entries,
|
||||
DirAppend: opts.DirAppend,
|
||||
OutputFile: opts.OutputFile,
|
||||
DirName: dirName,
|
||||
Entries: entries,
|
||||
DirAppend: opts.DirAppend,
|
||||
OutputFile: opts.OutputFile,
|
||||
IsRoot: false, // Local filesystem always shows parent directory
|
||||
WatermarkURL: opts.WatermarkURL,
|
||||
}
|
||||
|
||||
if opts.DryRun {
|
||||
@@ -273,7 +279,7 @@ func ReadDirEntries(dirPath string, opts *Options) ([]FileEntry, error) {
|
||||
for _, file := range files {
|
||||
fileName := file.Name()
|
||||
|
||||
if strings.EqualFold(fileName, opts.OutputFile) {
|
||||
if !opts.ShowIndexFiles && strings.EqualFold(fileName, opts.OutputFile) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -455,6 +461,15 @@ const htmlTemplateString = `<!DOCTYPE html>
|
||||
padding-top: 25px;
|
||||
padding-bottom: 15px;
|
||||
background-color: #f2f2f2;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.watermark {
|
||||
height: 48px;
|
||||
width: auto;
|
||||
margin-right: 12px;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
h1 {
|
||||
@@ -464,6 +479,8 @@ const htmlTemplateString = `<!DOCTYPE html>
|
||||
overflow-x: hidden;
|
||||
text-overflow: ellipsis;
|
||||
color: #999;
|
||||
margin: 0;
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
h1 a {
|
||||
@@ -940,6 +957,7 @@ const htmlTemplateString = `<!DOCTYPE html>
|
||||
</defs>
|
||||
</svg>
|
||||
<header>
|
||||
{{if .WatermarkURL}}<img src="{{.WatermarkURL}}" class="watermark" alt="Logo">{{end}}
|
||||
<h1>{{.DirName}}</h1>
|
||||
</header>
|
||||
<main>
|
||||
@@ -957,6 +975,7 @@ const htmlTemplateString = `<!DOCTYPE html>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{if not .IsRoot}}
|
||||
<tr class="clickable">
|
||||
<td></td>
|
||||
<td><a href="../{{if .DirAppend}}{{.OutputFile}}{{end}}">
|
||||
@@ -969,11 +988,12 @@ const htmlTemplateString = `<!DOCTYPE html>
|
||||
<td class="hideable">—</td>
|
||||
<td class="hideable"></td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{range .Entries}}
|
||||
<tr class="file">
|
||||
<td></td>
|
||||
<td>
|
||||
<a href="{{urlEscape .Path}}">
|
||||
<a href="{{.Path}}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><use xlink:href="#{{.IconType}}" class="{{.CSSClass}}"></use></svg>
|
||||
<span class="name">{{.Name}}</span>
|
||||
</a>
|
||||
|
||||
@@ -97,15 +97,19 @@ func TestHTMLTemplate(t *testing.T) {
|
||||
|
||||
// Test template execution with sample data
|
||||
data := struct {
|
||||
DirName string
|
||||
Entries []FileEntry
|
||||
DirAppend bool
|
||||
OutputFile string
|
||||
DirName string
|
||||
Entries []FileEntry
|
||||
DirAppend bool
|
||||
OutputFile string
|
||||
IsRoot bool
|
||||
WatermarkURL string
|
||||
}{
|
||||
DirName: "test-dir",
|
||||
Entries: []FileEntry{},
|
||||
DirAppend: false,
|
||||
OutputFile: "index.html",
|
||||
DirName: "test-dir",
|
||||
Entries: []FileEntry{},
|
||||
DirAppend: false,
|
||||
OutputFile: "index.html",
|
||||
IsRoot: false,
|
||||
WatermarkURL: "",
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
@@ -157,15 +161,19 @@ func TestHTMLTemplateWithEntries(t *testing.T) {
|
||||
}
|
||||
|
||||
data := struct {
|
||||
DirName string
|
||||
Entries []FileEntry
|
||||
DirAppend bool
|
||||
OutputFile string
|
||||
DirName string
|
||||
Entries []FileEntry
|
||||
DirAppend bool
|
||||
OutputFile string
|
||||
IsRoot bool
|
||||
WatermarkURL string
|
||||
}{
|
||||
DirName: "test-dir",
|
||||
Entries: entries,
|
||||
DirAppend: false,
|
||||
OutputFile: "index.html",
|
||||
DirName: "test-dir",
|
||||
Entries: entries,
|
||||
DirAppend: false,
|
||||
OutputFile: "index.html",
|
||||
IsRoot: false,
|
||||
WatermarkURL: "",
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
@@ -192,6 +200,62 @@ func TestHTMLTemplateWithEntries(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTMLTemplateWithWatermark(t *testing.T) {
|
||||
tmpl := GetHTMLTemplate()
|
||||
if tmpl == nil {
|
||||
t.Fatal("GetHTMLTemplate() returned nil")
|
||||
}
|
||||
|
||||
// Test template execution with watermark
|
||||
data := struct {
|
||||
DirName string
|
||||
Entries []FileEntry
|
||||
DirAppend bool
|
||||
OutputFile string
|
||||
IsRoot bool
|
||||
WatermarkURL string
|
||||
}{
|
||||
DirName: "test-dir",
|
||||
Entries: []FileEntry{},
|
||||
DirAppend: false,
|
||||
OutputFile: "index.html",
|
||||
IsRoot: false,
|
||||
WatermarkURL: "https://example.com/logo.svg",
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := tmpl.Execute(&buf, data)
|
||||
if err != nil {
|
||||
t.Fatalf("Template execution with watermark failed: %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
|
||||
// Check that watermark image is included
|
||||
if !bytes.Contains([]byte(output), []byte(`src="https://example.com/logo.svg"`)) {
|
||||
t.Error("Template output should contain watermark image URL")
|
||||
}
|
||||
|
||||
if !bytes.Contains([]byte(output), []byte(`class="watermark"`)) {
|
||||
t.Error("Template output should contain watermark CSS class")
|
||||
}
|
||||
|
||||
// Test without watermark
|
||||
data.WatermarkURL = ""
|
||||
buf.Reset()
|
||||
err = tmpl.Execute(&buf, data)
|
||||
if err != nil {
|
||||
t.Fatalf("Template execution without watermark failed: %v", err)
|
||||
}
|
||||
|
||||
outputNoWatermark := buf.String()
|
||||
|
||||
// Check that watermark image is NOT included when URL is empty
|
||||
if bytes.Contains([]byte(outputNoWatermark), []byte(`class="watermark"`)) {
|
||||
t.Error("Template output should not contain watermark when URL is empty")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadDirEntries(t *testing.T) {
|
||||
// Create a temporary directory with test files
|
||||
tempDir := t.TempDir()
|
||||
|
||||
@@ -245,9 +245,9 @@ func TestProcessDirWithDirAppend(t *testing.T) {
|
||||
|
||||
htmlContent := string(content)
|
||||
|
||||
// Check that directory links include index.html (URL escaped)
|
||||
if !strings.Contains(htmlContent, "subdir%2Findex.html") {
|
||||
t.Errorf("Directory links should include index.html when DirAppend is true. Expected subdir%%2Findex.html in content")
|
||||
// Check that directory links include index.html
|
||||
if !strings.Contains(htmlContent, "subdir/index.html") {
|
||||
t.Errorf("Directory links should include index.html when DirAppend is true. Expected subdir/index.html in content")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user