Compare commits

...

5 Commits

Author SHA1 Message Date
Pim van Pelt
e2e65add2e Add tests 2025-08-02 20:03:32 +02:00
Pim van Pelt
5eb76736cc print help if there are no sub-commands given 2025-08-02 19:54:42 +02:00
Pim van Pelt
3000b0799b Add resume_monitor() API call. including an --all flag 2025-08-02 19:49:54 +02:00
Pim van Pelt
3b61b13eef Implement pause_monitor() 2025-08-02 19:46:09 +02:00
Pim van Pelt
e3c6dd5513 Add info command 2025-08-02 19:38:49 +02:00
16 changed files with 1523 additions and 6 deletions

View File

@@ -1,4 +1,4 @@
.PHONY: clean build install test help
.PHONY: clean build install test test-deps help
# Default target
help:
@@ -6,7 +6,8 @@ help:
@echo " clean - Remove build artifacts and cache files"
@echo " build - Build the wheel package"
@echo " install - Install the package in development mode"
@echo " test - Run tests (if available)"
@echo " test - Run the test suite"
@echo " test-deps - Install test dependencies"
@echo " help - Show this help message"
# Clean build artifacts
@@ -33,10 +34,15 @@ install:
@echo "Installing package in development mode..."
pip install -e .
# Test the package (placeholder for when tests are added)
# Install test dependencies
test-deps:
@echo "Installing test dependencies..."
pip install -e ".[test]"
# Test the package
test:
@echo "Running tests..."
@echo "No tests configured yet."
python3 run_tests.py
# Rebuild and reinstall (useful during development)
dev: clean build

14
pytest.ini Normal file
View File

@@ -0,0 +1,14 @@
[tool:pytest]
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*
pythonpath = src
addopts =
-v
--tb=short
--strict-markers
--disable-warnings
filterwarnings =
ignore::DeprecationWarning
ignore::PendingDeprecationWarning

54
run_tests.py Executable file
View File

@@ -0,0 +1,54 @@
#!/usr/bin/env python3
"""
Test runner script for kumacli
Usage:
python run_tests.py # Run all tests
python run_tests.py --cov # Run tests with coverage
python run_tests.py tests/test_info.py # Run specific test file
"""
import sys
import subprocess
def run_tests(args=None):
"""Run pytest with optional arguments"""
# Use python3 explicitly for compatibility
cmd = ["python3", "-m", "pytest"]
if args:
cmd.extend(args)
else:
cmd.extend([
"tests/",
"-v",
"--tb=short"
])
try:
result = subprocess.run(cmd, check=True)
return result.returncode
except subprocess.CalledProcessError as e:
print(f"Tests failed with exit code: {e.returncode}")
return e.returncode
except FileNotFoundError:
print("pytest not found. Install with: pip install pytest")
return 1
def main():
"""Main entry point"""
if len(sys.argv) > 1:
# Pass through command line arguments
args = sys.argv[1:]
else:
args = None
exit_code = run_tests(args)
sys.exit(exit_code)
if __name__ == "__main__":
main()

View File

@@ -34,6 +34,16 @@ setup(
install_requires=[
"uptime-kuma-api>=1.0.0",
],
extras_require={
"dev": [
"pytest>=6.0",
"pytest-cov>=2.0",
],
"test": [
"pytest>=6.0",
"pytest-cov>=2.0",
],
},
entry_points={
"console_scripts": [
"kumacli=kumacli.kumacli:main",

36
src/kumacli/cmd/info.py Normal file
View File

@@ -0,0 +1,36 @@
#!/usr/bin/env python3
from ..client import KumaClient
class InfoCommands:
def __init__(self, client: KumaClient):
self.client = client
def get_info(self):
"""Get server info"""
try:
info = self.client.api.info()
if not info:
print("No server info available")
return
print("Server Information:")
for key, value in info.items():
print(f" {key}: {value}")
except Exception as e:
print(f"Error getting server info: {e}")
def setup_info_parser(subparsers):
"""Setup info command parser"""
info_parser = subparsers.add_parser("info", help="Get server information")
return info_parser
def handle_info_command(args, client):
"""Handle info command execution"""
info_commands = InfoCommands(client)
info_commands.get_info()
return True

View File

@@ -209,6 +209,8 @@ def setup_maintenance_parser(subparsers):
maintenance_parser = subparsers.add_parser(
"maintenance", help="Maintenance operations"
)
# Store reference to parser for help display
setup_maintenance_parser._parser = maintenance_parser
maintenance_subparsers = maintenance_parser.add_subparsers(
dest="maintenance_action", help="Maintenance actions"
)
@@ -263,7 +265,15 @@ def handle_maintenance_command(args, client):
"""Handle maintenance command execution"""
maintenance_commands = MaintenanceCommands(client)
if args.maintenance_action == "list":
if not args.maintenance_action:
if hasattr(setup_maintenance_parser, "_parser"):
setup_maintenance_parser._parser.print_help()
else:
print(
"Error: No maintenance action specified. Use --help for usage information."
)
return False
elif args.maintenance_action == "list":
maintenance_commands.list_maintenances()
elif args.maintenance_action == "add":
title = (

View File

@@ -67,10 +67,159 @@ class MonitorCommands:
except Exception as e:
print(f"Error listing monitors: {e}")
def pause_monitors(self, monitor_patterns=None, group_patterns=None):
"""Pause monitors by patterns and/or groups"""
try:
# Check if we have either monitor patterns or group patterns
if not monitor_patterns and not group_patterns:
print(
"Error: Either --monitor or --group flag is required. Specify at least one pattern."
)
return
matched_monitors = []
# Find monitors by patterns if specified
if monitor_patterns:
pattern_monitors = self.client.find_monitors_by_pattern(
monitor_patterns
)
matched_monitors.extend(pattern_monitors)
# Find monitors by groups if specified
if group_patterns:
group_monitors = self.client.get_monitors_in_groups(group_patterns)
# Convert to the same format as find_monitors_by_pattern
group_monitor_objs = [
{"id": m.get("id"), "name": m.get("name")} for m in group_monitors
]
matched_monitors.extend(group_monitor_objs)
# Remove duplicates while preserving order
seen = set()
unique_monitors = []
for monitor in matched_monitors:
if monitor["id"] not in seen:
seen.add(monitor["id"])
unique_monitors.append(monitor)
matched_monitors = unique_monitors
if not matched_monitors:
print(
"Error: No monitors found matching the specified patterns or groups"
)
return
print(f"Found {len(matched_monitors)} matching monitors to pause:")
for monitor in matched_monitors:
print(f" - {monitor['name']} (ID: {monitor['id']})")
# Pause each monitor
paused_count = 0
for monitor in matched_monitors:
try:
result = self.client.api.pause_monitor(monitor["id"])
print(f"Paused monitor '{monitor['name']}' (ID: {monitor['id']})")
paused_count += 1
except Exception as e:
print(f"Failed to pause monitor '{monitor['name']}': {e}")
print(
f"Successfully paused {paused_count} out of {len(matched_monitors)} monitors"
)
except Exception as e:
print(f"Error pausing monitors: {e}")
def resume_monitors(
self, monitor_patterns=None, group_patterns=None, resume_all=False
):
"""Resume monitors by patterns and/or groups, or all paused monitors"""
try:
# Check if we have either monitor patterns, group patterns, or --all flag
if not monitor_patterns and not group_patterns and not resume_all:
print("Error: Either --monitor, --group, or --all flag is required.")
return
matched_monitors = []
if resume_all:
# Get all monitors and filter for inactive (paused) ones
all_monitors = self.client.api.get_monitors()
paused_monitors = [
{"id": m.get("id"), "name": m.get("name")}
for m in all_monitors
if not m.get("active", True)
]
matched_monitors.extend(paused_monitors)
else:
# Find monitors by patterns if specified
if monitor_patterns:
pattern_monitors = self.client.find_monitors_by_pattern(
monitor_patterns
)
matched_monitors.extend(pattern_monitors)
# Find monitors by groups if specified
if group_patterns:
group_monitors = self.client.get_monitors_in_groups(group_patterns)
# Convert to the same format as find_monitors_by_pattern
group_monitor_objs = [
{"id": m.get("id"), "name": m.get("name")}
for m in group_monitors
]
matched_monitors.extend(group_monitor_objs)
# Remove duplicates while preserving order
seen = set()
unique_monitors = []
for monitor in matched_monitors:
if monitor["id"] not in seen:
seen.add(monitor["id"])
unique_monitors.append(monitor)
matched_monitors = unique_monitors
if not matched_monitors:
if resume_all:
print("No paused monitors found to resume")
else:
print(
"Error: No monitors found matching the specified patterns or groups"
)
return
if resume_all:
print(f"Found {len(matched_monitors)} paused monitors to resume:")
else:
print(f"Found {len(matched_monitors)} matching monitors to resume:")
for monitor in matched_monitors:
print(f" - {monitor['name']} (ID: {monitor['id']})")
# Resume each monitor
resumed_count = 0
for monitor in matched_monitors:
try:
result = self.client.api.resume_monitor(monitor["id"])
print(f"Resumed monitor '{monitor['name']}' (ID: {monitor['id']})")
resumed_count += 1
except Exception as e:
print(f"Failed to resume monitor '{monitor['name']}': {e}")
print(
f"Successfully resumed {resumed_count} out of {len(matched_monitors)} monitors"
)
except Exception as e:
print(f"Error resuming monitors: {e}")
def setup_monitor_parser(subparsers):
"""Setup monitor command parser"""
monitor_parser = subparsers.add_parser("monitor", help="Monitor operations")
# Store reference to parser for help display
setup_monitor_parser._parser = monitor_parser
monitor_subparsers = monitor_parser.add_subparsers(
dest="monitor_action", help="Monitor actions"
)
@@ -90,6 +239,39 @@ def setup_monitor_parser(subparsers):
help="Group name pattern to filter by (supports wildcards, can be repeated)",
)
# Pause monitors command
pause_monitors_parser = monitor_subparsers.add_parser(
"pause", help="Pause monitors"
)
pause_monitors_parser.add_argument(
"--monitor",
action="append",
help="Monitor name pattern to pause (supports wildcards, can be repeated)",
)
pause_monitors_parser.add_argument(
"--group",
action="append",
help="Group name pattern to pause all group members (supports wildcards, can be repeated)",
)
# Resume monitors command
resume_monitors_parser = monitor_subparsers.add_parser(
"resume", help="Resume monitors"
)
resume_monitors_parser.add_argument(
"--monitor",
action="append",
help="Monitor name pattern to resume (supports wildcards, can be repeated)",
)
resume_monitors_parser.add_argument(
"--group",
action="append",
help="Group name pattern to resume all group members (supports wildcards, can be repeated)",
)
resume_monitors_parser.add_argument(
"--all", action="store_true", help="Resume all paused monitors"
)
return monitor_parser
@@ -97,10 +279,28 @@ def handle_monitor_command(args, client):
"""Handle monitor command execution"""
monitor_commands = MonitorCommands(client)
if args.monitor_action == "list":
if not args.monitor_action:
if hasattr(setup_monitor_parser, "_parser"):
setup_monitor_parser._parser.print_help()
else:
print(
"Error: No monitor action specified. Use --help for usage information."
)
return False
elif args.monitor_action == "list":
monitor_commands.list_monitors(
monitor_patterns=args.monitor, group_patterns=args.group
)
elif args.monitor_action == "pause":
monitor_commands.pause_monitors(
monitor_patterns=args.monitor, group_patterns=args.group
)
elif args.monitor_action == "resume":
monitor_commands.resume_monitors(
monitor_patterns=args.monitor,
group_patterns=args.group,
resume_all=args.all,
)
else:
print("Unknown monitor action. Use --help for usage information.")
return False

View File

@@ -10,6 +10,7 @@ try:
from .client import KumaClient
from .cmd.monitor import setup_monitor_parser, handle_monitor_command
from .cmd.maintenance import setup_maintenance_parser, handle_maintenance_command
from .cmd.info import setup_info_parser, handle_info_command
except ImportError:
# Running directly, add parent directory to path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
@@ -19,6 +20,7 @@ except ImportError:
setup_maintenance_parser,
handle_maintenance_command,
)
from kumacli.cmd.info import setup_info_parser, handle_info_command
def main():
@@ -40,6 +42,7 @@ def main():
# Setup command parsers
monitor_parser = setup_monitor_parser(subparsers)
maintenance_parser = setup_maintenance_parser(subparsers)
info_parser = setup_info_parser(subparsers)
args = parser.parse_args()
@@ -69,6 +72,8 @@ def main():
success = handle_monitor_command(args, client)
elif args.resource == "maintenance":
success = handle_maintenance_command(args, client)
elif args.resource == "info":
success = handle_info_command(args, client)
else:
parser.print_help()
sys.exit(1)

100
tests/README.md Normal file
View File

@@ -0,0 +1,100 @@
# KumaCLI Tests
This directory contains the test suite for kumacli.
## Running Tests
### Prerequisites
Install test dependencies:
```bash
pip install -e ".[test]"
# or
pip install pytest pytest-cov
```
### Run All Tests
```bash
# Using pytest directly
python3 -m pytest
# Using the test runner script
python3 run_tests.py
# From the project root
python3 -m pytest tests/
```
### Run Specific Tests
```bash
# Test a specific file
pytest tests/test_info.py
# Test a specific class
pytest tests/test_monitor.py::TestMonitorCommands
# Test a specific method
pytest tests/test_monitor.py::TestMonitorCommands::test_pause_monitors_by_pattern
```
### Run Tests with Coverage
```bash
pytest --cov=kumacli --cov-report=html
python run_tests.py --cov
```
### Test Options
```bash
# Verbose output
pytest -v
# Stop on first failure
pytest -x
# Run tests in parallel (requires pytest-xdist)
pytest -n auto
```
## Test Structure
- `conftest.py` - Shared fixtures and test configuration
- `test_info.py` - Tests for the info command
- `test_monitor.py` - Tests for monitor commands (list, pause, resume)
- `test_maintenance.py` - Tests for maintenance commands
- `test_client.py` - Tests for the KumaClient class
- `test_cli_integration.py` - Integration tests for CLI functionality
## Test Coverage
The tests cover:
- ✅ Command argument parsing
- ✅ API method calls and responses
- ✅ Error handling and edge cases
- ✅ Help message functionality
- ✅ Monitor pause/resume operations
- ✅ Maintenance operations
- ✅ Client utility functions
- ✅ Integration between components
## Mock Strategy
Tests use unittest.mock to:
- Mock the UptimeKumaApi calls
- Simulate API responses and errors
- Test command logic without requiring a live server
- Verify correct API method calls with expected parameters
## Adding New Tests
When adding new functionality:
1. Add unit tests for the new commands/methods
2. Add integration tests if the feature involves multiple components
3. Test both success and error cases
4. Mock external dependencies (API calls, file operations)
5. Use descriptive test names that explain what is being tested

1
tests/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Test package for kumacli

77
tests/conftest.py Normal file
View File

@@ -0,0 +1,77 @@
#!/usr/bin/env python3
import sys
import os
# Add the src directory to Python path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
import pytest
from unittest.mock import Mock, MagicMock
from kumacli.client import KumaClient
@pytest.fixture
def mock_client():
"""Create a mock KumaClient for testing"""
client = Mock(spec=KumaClient)
client.api = Mock()
return client
@pytest.fixture
def mock_monitors():
"""Sample monitor data for testing"""
return [
{
"id": 1,
"name": "Test Monitor 1",
"type": "http",
"url": "https://example.com",
"active": True,
"parent": None
},
{
"id": 2,
"name": "Test Monitor 2",
"type": "http",
"url": "https://test.com",
"active": False,
"parent": None
},
{
"id": 3,
"name": "Group Monitor",
"type": "group",
"active": True,
"parent": None
},
{
"id": 4,
"name": "Child Monitor",
"type": "http",
"url": "https://child.com",
"active": False,
"parent": 3
}
]
@pytest.fixture
def mock_maintenances():
"""Sample maintenance data for testing"""
return [
{
"id": 1,
"title": "Test Maintenance",
"description": "Test maintenance description",
"strategy": "single",
"active": True
},
{
"id": 2,
"title": "Inactive Maintenance",
"description": "Inactive maintenance description",
"strategy": "single",
"active": False
}
]

View File

@@ -0,0 +1,254 @@
#!/usr/bin/env python3
import pytest
from unittest.mock import Mock, patch, MagicMock
import argparse
from io import StringIO
import sys
from kumacli.cmd.monitor import setup_monitor_parser, handle_monitor_command
from kumacli.cmd.maintenance import setup_maintenance_parser, handle_maintenance_command
from kumacli.cmd.info import setup_info_parser, handle_info_command
class TestCLIIntegration:
def test_monitor_parser_setup(self):
"""Test monitor parser setup"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="resource")
monitor_parser = setup_monitor_parser(subparsers)
# Verify parser is created
assert monitor_parser is not None
assert hasattr(setup_monitor_parser, '_parser')
def test_maintenance_parser_setup(self):
"""Test maintenance parser setup"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="resource")
maintenance_parser = setup_maintenance_parser(subparsers)
# Verify parser is created
assert maintenance_parser is not None
assert hasattr(setup_maintenance_parser, '_parser')
def test_info_parser_setup(self):
"""Test info parser setup"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="resource")
info_parser = setup_info_parser(subparsers)
# Verify parser is created
assert info_parser is not None
def test_monitor_help_message(self, mock_client, capsys):
"""Test monitor command shows help when no action specified"""
# Setup
mock_args = Mock()
mock_args.monitor_action = None
# Setup parser reference
mock_parser = Mock()
setup_monitor_parser._parser = mock_parser
# Execute
result = handle_monitor_command(mock_args, mock_client)
# Verify
assert result is False
mock_parser.print_help.assert_called_once()
def test_maintenance_help_message(self, mock_client, capsys):
"""Test maintenance command shows help when no action specified"""
# Setup
mock_args = Mock()
mock_args.maintenance_action = None
# Setup parser reference
mock_parser = Mock()
setup_maintenance_parser._parser = mock_parser
# Execute
result = handle_maintenance_command(mock_args, mock_client)
# Verify
assert result is False
mock_parser.print_help.assert_called_once()
def test_monitor_command_with_full_args(self, mock_client):
"""Test monitor command with complete argument structure"""
# Setup
mock_args = Mock()
mock_args.monitor_action = "pause"
mock_args.monitor = ["test*"]
mock_args.group = ["web-services"]
# Mock client methods
mock_client.find_monitors_by_pattern.return_value = [
{"id": 1, "name": "test-monitor"}
]
mock_client.get_monitors_in_groups.return_value = [
{"id": 2, "name": "web-service-monitor"}
]
mock_client.api.pause_monitor.return_value = {"msg": "Paused Successfully."}
# Execute
result = handle_monitor_command(mock_args, mock_client)
# Verify
assert result is True
mock_client.find_monitors_by_pattern.assert_called_once_with(["test*"])
mock_client.get_monitors_in_groups.assert_called_once_with(["web-services"])
# Should pause both monitors (deduplicated)
assert mock_client.api.pause_monitor.call_count == 2
def test_resume_all_monitors_integration(self, mock_client, mock_monitors):
"""Test resume all monitors integration"""
# Setup
mock_args = Mock()
mock_args.monitor_action = "resume"
mock_args.monitor = None
mock_args.group = None
mock_args.all = True
mock_client.api.get_monitors.return_value = mock_monitors
mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."}
# Execute
result = handle_monitor_command(mock_args, mock_client)
# Verify
assert result is True
mock_client.api.get_monitors.assert_called_once()
# Should resume only paused monitors (ID 2 and 4 from mock_monitors)
assert mock_client.api.resume_monitor.call_count == 2
mock_client.api.resume_monitor.assert_any_call(2)
mock_client.api.resume_monitor.assert_any_call(4)
class TestArgumentParsing:
def test_monitor_list_arguments(self):
"""Test monitor list command argument parsing"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="resource")
setup_monitor_parser(subparsers)
# Test with monitor patterns
args = parser.parse_args(["monitor", "list", "--monitor", "web*", "--monitor", "api*"])
assert args.resource == "monitor"
assert args.monitor_action == "list"
assert args.monitor == ["web*", "api*"]
def test_monitor_pause_arguments(self):
"""Test monitor pause command argument parsing"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="resource")
setup_monitor_parser(subparsers)
# Test with group patterns
args = parser.parse_args(["monitor", "pause", "--group", "production"])
assert args.resource == "monitor"
assert args.monitor_action == "pause"
assert args.group == ["production"]
def test_monitor_resume_all_arguments(self):
"""Test monitor resume all command argument parsing"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="resource")
setup_monitor_parser(subparsers)
# Test with --all flag
args = parser.parse_args(["monitor", "resume", "--all"])
assert args.resource == "monitor"
assert args.monitor_action == "resume"
assert args.all is True
def test_maintenance_add_arguments(self):
"""Test maintenance add command argument parsing"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="resource")
setup_maintenance_parser(subparsers)
# Test maintenance add
args = parser.parse_args([
"maintenance", "add",
"--title", "Server Update",
"--description", "Updating server software",
"--duration", "2h",
"--monitor", "server*"
])
assert args.resource == "maintenance"
assert args.maintenance_action == "add"
assert args.title == "Server Update"
assert args.description == "Updating server software"
assert args.duration == "2h"
assert args.monitor == ["server*"]
def test_maintenance_delete_arguments(self):
"""Test maintenance delete command argument parsing"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="resource")
setup_maintenance_parser(subparsers)
# Test delete by ID
args = parser.parse_args(["maintenance", "delete", "--id", "123"])
assert args.resource == "maintenance"
assert args.maintenance_action == "delete"
assert args.id == 123
# Test delete all
args = parser.parse_args(["maintenance", "delete", "--all"])
assert args.resource == "maintenance"
assert args.maintenance_action == "delete"
assert args.all is True
def test_info_arguments(self):
"""Test info command argument parsing"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="resource")
setup_info_parser(subparsers)
# Test info command
args = parser.parse_args(["info"])
assert args.resource == "info"
class TestErrorHandling:
def test_monitor_command_resilience(self, mock_client, capsys):
"""Test monitor command handles various error conditions"""
# Setup
mock_args = Mock()
mock_args.monitor_action = "pause"
mock_args.monitor = ["nonexistent*"]
mock_args.group = None
# Mock no matches found
mock_client.find_monitors_by_pattern.return_value = []
# Execute
result = handle_monitor_command(mock_args, mock_client)
# Verify error handling
assert result is True # Command completes even with no matches
captured = capsys.readouterr()
assert "Error: No monitors found matching the specified patterns or groups" in captured.out
def test_maintenance_command_resilience(self, mock_client, capsys):
"""Test maintenance command handles API errors"""
# Setup
mock_args = Mock()
mock_args.maintenance_action = "list"
# Mock API error
mock_client.api.get_maintenances.side_effect = Exception("Connection timeout")
# Execute
result = handle_maintenance_command(mock_args, mock_client)
# Verify error handling
assert result is True # Command completes even with error
captured = capsys.readouterr()
assert "Error listing maintenances: Connection timeout" in captured.out

226
tests/test_client.py Normal file
View File

@@ -0,0 +1,226 @@
#!/usr/bin/env python3
import pytest
from unittest.mock import Mock, patch
from datetime import datetime, timedelta
from kumacli.client import KumaClient
class TestKumaClient:
def test_parse_duration_minutes(self):
"""Test parsing duration in minutes"""
client = KumaClient("http://test.com")
assert client.parse_duration("90m") == 5400 # 90 * 60
assert client.parse_duration("1m") == 60
assert client.parse_duration("120m") == 7200
def test_parse_duration_hours(self):
"""Test parsing duration in hours"""
client = KumaClient("http://test.com")
assert client.parse_duration("1h") == 3600 # 1 * 3600
assert client.parse_duration("2h") == 7200
assert client.parse_duration("24h") == 86400
def test_parse_duration_seconds(self):
"""Test parsing duration in seconds"""
client = KumaClient("http://test.com")
assert client.parse_duration("3600s") == 3600
assert client.parse_duration("60s") == 60
assert client.parse_duration("1s") == 1
def test_parse_duration_default(self):
"""Test parsing duration with default value"""
client = KumaClient("http://test.com")
assert client.parse_duration(None) == 5400 # Default 90 minutes
assert client.parse_duration("") == 5400
def test_parse_duration_invalid(self):
"""Test parsing invalid duration format"""
client = KumaClient("http://test.com")
with pytest.raises(ValueError, match="Invalid duration format"):
client.parse_duration("invalid")
with pytest.raises(ValueError, match="Invalid duration format"):
client.parse_duration("90x")
with pytest.raises(ValueError, match="Invalid duration format"):
client.parse_duration("90")
def test_parse_start_time_none(self):
"""Test parsing start time with None (current time)"""
client = KumaClient("http://test.com")
before = datetime.utcnow()
result = client.parse_start_time(None)
after = datetime.utcnow()
assert before <= result <= after
def test_parse_start_time_iso_format(self):
"""Test parsing ISO format start time"""
client = KumaClient("http://test.com")
# Test ISO format with Z
result = client.parse_start_time("2023-12-25T10:30:00Z")
expected = datetime(2023, 12, 25, 10, 30, 0)
assert result == expected
# Test ISO format with timezone
result = client.parse_start_time("2023-12-25T10:30:00+00:00")
assert result == expected
def test_parse_start_time_common_formats(self):
"""Test parsing common date/time formats"""
client = KumaClient("http://test.com")
# Full datetime
result = client.parse_start_time("2023-12-25 10:30:00")
expected = datetime(2023, 12, 25, 10, 30, 0)
assert result == expected
# Date and hour:minute
result = client.parse_start_time("2023-12-25 10:30")
expected = datetime(2023, 12, 25, 10, 30, 0)
assert result == expected
# Date only
result = client.parse_start_time("2023-12-25")
expected = datetime(2023, 12, 25, 0, 0, 0)
assert result == expected
def test_parse_start_time_invalid(self):
"""Test parsing invalid start time format"""
client = KumaClient("http://test.com")
with pytest.raises(ValueError, match="Invalid start time format"):
client.parse_start_time("invalid-date")
with pytest.raises(ValueError, match="Invalid start time format"):
client.parse_start_time("2023-13-45")
def test_find_monitors_by_pattern_success(self):
"""Test finding monitors by pattern successfully"""
client = KumaClient("http://test.com")
client.api = Mock()
mock_monitors = [
{"id": 1, "name": "Web Server"},
{"id": 2, "name": "API Server"},
{"id": 3, "name": "Database"},
{"id": 4, "name": "Web Frontend"}
]
client.api.get_monitors.return_value = mock_monitors
# Test exact match
result = client.find_monitors_by_pattern(["Web Server"])
assert len(result) == 1
assert result[0]["name"] == "Web Server"
# Test wildcard pattern
result = client.find_monitors_by_pattern(["Web*"])
assert len(result) == 2
names = [m["name"] for m in result]
assert "Web Server" in names
assert "Web Frontend" in names
def test_find_monitors_by_pattern_case_insensitive(self):
"""Test finding monitors by pattern is case insensitive"""
client = KumaClient("http://test.com")
client.api = Mock()
mock_monitors = [
{"id": 1, "name": "Web Server"},
{"id": 2, "name": "API Server"}
]
client.api.get_monitors.return_value = mock_monitors
# Test case insensitive matching
result = client.find_monitors_by_pattern(["web*"])
assert len(result) == 1
assert result[0]["name"] == "Web Server"
def test_find_monitors_by_pattern_no_matches(self):
"""Test finding monitors with no matches"""
client = KumaClient("http://test.com")
client.api = Mock()
mock_monitors = [
{"id": 1, "name": "Web Server"}
]
client.api.get_monitors.return_value = mock_monitors
result = client.find_monitors_by_pattern(["Database*"])
assert len(result) == 0
def test_find_monitors_by_pattern_duplicates(self):
"""Test finding monitors removes duplicates"""
client = KumaClient("http://test.com")
client.api = Mock()
mock_monitors = [
{"id": 1, "name": "Web Server"}
]
client.api.get_monitors.return_value = mock_monitors
# Same monitor should match both patterns
result = client.find_monitors_by_pattern(["Web*", "*Server"])
assert len(result) == 1
assert result[0]["name"] == "Web Server"
def test_find_monitors_by_pattern_api_error(self, capsys):
"""Test finding monitors handles API errors"""
client = KumaClient("http://test.com")
client.api = Mock()
client.api.get_monitors.side_effect = Exception("API Error")
result = client.find_monitors_by_pattern(["Web*"])
assert len(result) == 0
captured = capsys.readouterr()
assert "Error finding monitors: API Error" in captured.out
@patch('kumacli.client.UptimeKumaApi')
def test_connect_success(self, mock_api_class, capsys):
"""Test successful connection"""
mock_api = Mock()
mock_api_class.return_value = mock_api
mock_api.login.return_value = True
client = KumaClient("http://test.com", "user", "pass")
result = client.connect()
assert result is True
assert client.api is mock_api
mock_api_class.assert_called_once_with("http://test.com")
mock_api.login.assert_called_once_with("user", "pass")
captured = capsys.readouterr()
assert "Connected to http://test.com" in captured.out
@patch('kumacli.client.UptimeKumaApi')
def test_connect_failure(self, mock_api_class, capsys):
"""Test connection failure"""
mock_api_class.side_effect = Exception("Connection failed")
client = KumaClient("http://test.com", "user", "pass")
result = client.connect()
assert result is False
captured = capsys.readouterr()
assert "Failed to connect: Connection failed" in captured.out
def test_disconnect(self):
"""Test disconnection"""
client = KumaClient("http://test.com")
client.api = Mock()
client.disconnect()
client.api.disconnect.assert_called_once()

92
tests/test_info.py Normal file
View File

@@ -0,0 +1,92 @@
#!/usr/bin/env python3
import pytest
from unittest.mock import Mock, patch
from io import StringIO
import sys
from kumacli.cmd.info import InfoCommands, handle_info_command
class TestInfoCommands:
def test_get_info_success(self, mock_client, capsys):
"""Test successful info retrieval"""
# Setup
mock_info_data = {
"version": "1.23.0",
"hostname": "kuma-server",
"primaryBaseURL": "https://status.example.com"
}
mock_client.api.info.return_value = mock_info_data
info_commands = InfoCommands(mock_client)
# Execute
info_commands.get_info()
# Verify
mock_client.api.info.assert_called_once()
captured = capsys.readouterr()
assert "Server Information:" in captured.out
assert "version: 1.23.0" in captured.out
assert "hostname: kuma-server" in captured.out
assert "primaryBaseURL: https://status.example.com" in captured.out
def test_get_info_empty_response(self, mock_client, capsys):
"""Test info command with empty response"""
# Setup
mock_client.api.info.return_value = None
info_commands = InfoCommands(mock_client)
# Execute
info_commands.get_info()
# Verify
mock_client.api.info.assert_called_once()
captured = capsys.readouterr()
assert "No server info available" in captured.out
def test_get_info_api_error(self, mock_client, capsys):
"""Test info command with API error"""
# Setup
mock_client.api.info.side_effect = Exception("Connection failed")
info_commands = InfoCommands(mock_client)
# Execute
info_commands.get_info()
# Verify
mock_client.api.info.assert_called_once()
captured = capsys.readouterr()
assert "Error getting server info: Connection failed" in captured.out
class TestInfoCommandHandler:
def test_handle_info_command(self, mock_client):
"""Test info command handler"""
# Setup
mock_args = Mock()
mock_info_data = {"version": "1.23.0"}
mock_client.api.info.return_value = mock_info_data
# Execute
result = handle_info_command(mock_args, mock_client)
# Verify
assert result is True
mock_client.api.info.assert_called_once()
def test_handle_info_command_with_error(self, mock_client):
"""Test info command handler with error"""
# Setup
mock_args = Mock()
mock_client.api.info.side_effect = Exception("API Error")
# Execute
result = handle_info_command(mock_args, mock_client)
# Verify
assert result is True # Handler always returns True
mock_client.api.info.assert_called_once()

168
tests/test_maintenance.py Normal file
View File

@@ -0,0 +1,168 @@
#!/usr/bin/env python3
import pytest
from unittest.mock import Mock, patch
from io import StringIO
import sys
from kumacli.cmd.maintenance import MaintenanceCommands, handle_maintenance_command
class TestMaintenanceCommands:
def test_list_maintenances_success(self, mock_client, mock_maintenances, capsys):
"""Test successful maintenance listing"""
# Setup
mock_client.api.get_maintenances.return_value = mock_maintenances
maintenance_commands = MaintenanceCommands(mock_client)
# Execute
maintenance_commands.list_maintenances()
# Verify
mock_client.api.get_maintenances.assert_called_once()
captured = capsys.readouterr()
assert "Test Maintenance" in captured.out
assert "Inactive Maintenance" in captured.out
assert "Active" in captured.out
assert "Inactive" in captured.out
def test_list_maintenances_empty(self, mock_client, capsys):
"""Test maintenance listing with no maintenances"""
# Setup
mock_client.api.get_maintenances.return_value = []
maintenance_commands = MaintenanceCommands(mock_client)
# Execute
maintenance_commands.list_maintenances()
# Verify
captured = capsys.readouterr()
assert "No maintenances found" in captured.out
def test_list_maintenances_api_error(self, mock_client, capsys):
"""Test maintenance listing with API error"""
# Setup
mock_client.api.get_maintenances.side_effect = Exception("API Error")
maintenance_commands = MaintenanceCommands(mock_client)
# Execute
maintenance_commands.list_maintenances()
# Verify
captured = capsys.readouterr()
assert "Error listing maintenances: API Error" in captured.out
def test_delete_maintenance_by_id(self, mock_client, capsys):
"""Test deleting maintenance by ID"""
# Setup
mock_maintenance = {"id": 1, "title": "Test Maintenance"}
mock_client.api.get_maintenance.return_value = mock_maintenance
mock_client.api.delete_maintenance.return_value = {"msg": "Deleted Successfully"}
maintenance_commands = MaintenanceCommands(mock_client)
# Execute
maintenance_commands.delete_maintenance(maintenance_id=1)
# Verify
mock_client.api.get_maintenance.assert_called_once_with(1)
mock_client.api.delete_maintenance.assert_called_once_with(1)
captured = capsys.readouterr()
assert "Successfully deleted maintenance 'Test Maintenance' (ID: 1)" in captured.out
def test_delete_all_maintenances(self, mock_client, mock_maintenances, capsys):
"""Test deleting all maintenances"""
# Setup
mock_client.api.get_maintenances.return_value = mock_maintenances
mock_client.api.delete_maintenance.return_value = {"msg": "Deleted Successfully"}
maintenance_commands = MaintenanceCommands(mock_client)
# Execute
maintenance_commands.delete_maintenance(delete_all=True)
# Verify
assert mock_client.api.delete_maintenance.call_count == 2
captured = capsys.readouterr()
assert "Found 2 maintenances to delete:" in captured.out
assert "Successfully deleted 2 out of 2 maintenances" in captured.out
def test_delete_maintenance_no_params(self, mock_client, capsys):
"""Test deleting maintenance without parameters"""
maintenance_commands = MaintenanceCommands(mock_client)
# Execute
maintenance_commands.delete_maintenance()
# Verify
captured = capsys.readouterr()
assert "Error: Either --id or --all flag is required for delete operation" in captured.out
class TestMaintenanceCommandHandler:
def test_handle_maintenance_command_no_action(self, mock_client, capsys):
"""Test maintenance command handler with no action"""
# Setup
mock_args = Mock()
mock_args.maintenance_action = None
# Mock the parser setup
with patch('kumacli.cmd.maintenance.setup_maintenance_parser') as mock_setup:
mock_parser = Mock()
mock_setup._parser = mock_parser
# Execute
result = handle_maintenance_command(mock_args, mock_client)
# Verify
assert result is False
def test_handle_maintenance_command_list(self, mock_client, mock_maintenances):
"""Test maintenance command handler for list action"""
# Setup
mock_args = Mock()
mock_args.maintenance_action = "list"
mock_client.api.get_maintenances.return_value = mock_maintenances
# Execute
result = handle_maintenance_command(mock_args, mock_client)
# Verify
assert result is True
mock_client.api.get_maintenances.assert_called_once()
def test_handle_maintenance_command_delete(self, mock_client):
"""Test maintenance command handler for delete action"""
# Setup
mock_args = Mock()
mock_args.maintenance_action = "delete"
mock_args.id = 1
mock_args.all = False
mock_maintenance = {"id": 1, "title": "Test Maintenance"}
mock_client.api.get_maintenance.return_value = mock_maintenance
mock_client.api.delete_maintenance.return_value = {"msg": "Deleted Successfully"}
# Execute
result = handle_maintenance_command(mock_args, mock_client)
# Verify
assert result is True
mock_client.api.delete_maintenance.assert_called_once_with(1)
def test_handle_maintenance_command_unknown_action(self, mock_client, capsys):
"""Test maintenance command handler with unknown action"""
# Setup
mock_args = Mock()
mock_args.maintenance_action = "unknown"
# Execute
result = handle_maintenance_command(mock_args, mock_client)
# Verify
assert result is False
captured = capsys.readouterr()
assert "Unknown maintenance action. Use --help for usage information." in captured.out

264
tests/test_monitor.py Normal file
View File

@@ -0,0 +1,264 @@
#!/usr/bin/env python3
import pytest
from unittest.mock import Mock, patch
from io import StringIO
import sys
from kumacli.cmd.monitor import MonitorCommands, handle_monitor_command, setup_monitor_parser
class TestMonitorCommands:
def test_pause_monitors_by_pattern(self, mock_client, mock_monitors, capsys):
"""Test pausing monitors by pattern"""
# Setup
mock_client.api.get_monitors.return_value = mock_monitors
mock_client.find_monitors_by_pattern.return_value = [
{"id": 1, "name": "Test Monitor 1"},
{"id": 2, "name": "Test Monitor 2"}
]
mock_client.api.pause_monitor.return_value = {"msg": "Paused Successfully."}
monitor_commands = MonitorCommands(mock_client)
# Execute
monitor_commands.pause_monitors(monitor_patterns=["Test*"])
# Verify
mock_client.find_monitors_by_pattern.assert_called_once_with(["Test*"])
assert mock_client.api.pause_monitor.call_count == 2
mock_client.api.pause_monitor.assert_any_call(1)
mock_client.api.pause_monitor.assert_any_call(2)
captured = capsys.readouterr()
assert "Found 2 matching monitors to pause:" in captured.out
assert "Paused monitor 'Test Monitor 1' (ID: 1)" in captured.out
assert "Paused monitor 'Test Monitor 2' (ID: 2)" in captured.out
assert "Successfully paused 2 out of 2 monitors" in captured.out
def test_pause_monitors_by_group(self, mock_client, mock_monitors, capsys):
"""Test pausing monitors by group"""
# Setup
mock_client.get_monitors_in_groups.return_value = [
{"id": 4, "name": "Child Monitor"}
]
mock_client.api.pause_monitor.return_value = {"msg": "Paused Successfully."}
monitor_commands = MonitorCommands(mock_client)
# Execute
monitor_commands.pause_monitors(group_patterns=["Group*"])
# Verify
mock_client.get_monitors_in_groups.assert_called_once_with(["Group*"])
mock_client.api.pause_monitor.assert_called_once_with(4)
captured = capsys.readouterr()
assert "Found 1 matching monitors to pause:" in captured.out
assert "Paused monitor 'Child Monitor' (ID: 4)" in captured.out
def test_pause_monitors_no_patterns(self, mock_client, capsys):
"""Test pausing monitors without patterns"""
monitor_commands = MonitorCommands(mock_client)
# Execute
monitor_commands.pause_monitors()
# Verify
captured = capsys.readouterr()
assert "Error: Either --monitor or --group flag is required." in captured.out
def test_pause_monitors_no_matches(self, mock_client, capsys):
"""Test pausing monitors with no matches"""
# Setup
mock_client.find_monitors_by_pattern.return_value = []
monitor_commands = MonitorCommands(mock_client)
# Execute
monitor_commands.pause_monitors(monitor_patterns=["NonExistent*"])
# Verify
captured = capsys.readouterr()
assert "Error: No monitors found matching the specified patterns or groups" in captured.out
def test_pause_monitors_api_error(self, mock_client, capsys):
"""Test pausing monitors with API error"""
# Setup
mock_client.find_monitors_by_pattern.return_value = [
{"id": 1, "name": "Test Monitor 1"}
]
mock_client.api.pause_monitor.side_effect = Exception("API Error")
monitor_commands = MonitorCommands(mock_client)
# Execute
monitor_commands.pause_monitors(monitor_patterns=["Test*"])
# Verify
captured = capsys.readouterr()
assert "Failed to pause monitor 'Test Monitor 1': API Error" in captured.out
assert "Successfully paused 0 out of 1 monitors" in captured.out
def test_resume_monitors_by_pattern(self, mock_client, mock_monitors, capsys):
"""Test resuming monitors by pattern"""
# Setup
mock_client.find_monitors_by_pattern.return_value = [
{"id": 2, "name": "Test Monitor 2"}
]
mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."}
monitor_commands = MonitorCommands(mock_client)
# Execute
monitor_commands.resume_monitors(monitor_patterns=["Test*"])
# Verify
mock_client.api.resume_monitor.assert_called_once_with(2)
captured = capsys.readouterr()
assert "Found 1 matching monitors to resume:" in captured.out
assert "Resumed monitor 'Test Monitor 2' (ID: 2)" in captured.out
def test_resume_monitors_all_paused(self, mock_client, mock_monitors, capsys):
"""Test resuming all paused monitors"""
# Setup
mock_client.api.get_monitors.return_value = mock_monitors
mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."}
monitor_commands = MonitorCommands(mock_client)
# Execute
monitor_commands.resume_monitors(resume_all=True)
# Verify
# Should resume monitors with active=False (monitors 2 and 4)
assert mock_client.api.resume_monitor.call_count == 2
mock_client.api.resume_monitor.assert_any_call(2)
mock_client.api.resume_monitor.assert_any_call(4)
captured = capsys.readouterr()
assert "Found 2 paused monitors to resume:" in captured.out
assert "Successfully resumed 2 out of 2 monitors" in captured.out
def test_resume_monitors_all_no_paused(self, mock_client, capsys):
"""Test resuming all paused monitors when none are paused"""
# Setup
active_monitors = [
{"id": 1, "name": "Active Monitor", "active": True}
]
mock_client.api.get_monitors.return_value = active_monitors
monitor_commands = MonitorCommands(mock_client)
# Execute
monitor_commands.resume_monitors(resume_all=True)
# Verify
captured = capsys.readouterr()
assert "No paused monitors found to resume" in captured.out
def test_resume_monitors_no_args(self, mock_client, capsys):
"""Test resuming monitors without any arguments"""
monitor_commands = MonitorCommands(mock_client)
# Execute
monitor_commands.resume_monitors()
# Verify
captured = capsys.readouterr()
assert "Error: Either --monitor, --group, or --all flag is required." in captured.out
class TestMonitorCommandHandler:
def test_handle_monitor_command_no_action(self, mock_client, capsys):
"""Test monitor command handler with no action"""
# Setup
mock_args = Mock()
mock_args.monitor_action = None
# Mock the parser setup to avoid importing issues
with patch('kumacli.cmd.monitor.setup_monitor_parser') as mock_setup:
mock_parser = Mock()
mock_setup._parser = mock_parser
# Execute
result = handle_monitor_command(mock_args, mock_client)
# Verify
assert result is False
def test_handle_monitor_command_pause(self, mock_client):
"""Test monitor command handler for pause action"""
# Setup
mock_args = Mock()
mock_args.monitor_action = "pause"
mock_args.monitor = ["test*"]
mock_args.group = None
mock_client.find_monitors_by_pattern.return_value = [
{"id": 1, "name": "test monitor"}
]
mock_client.api.pause_monitor.return_value = {"msg": "Paused Successfully."}
# Execute
result = handle_monitor_command(mock_args, mock_client)
# Verify
assert result is True
mock_client.api.pause_monitor.assert_called_once_with(1)
def test_handle_monitor_command_resume(self, mock_client):
"""Test monitor command handler for resume action"""
# Setup
mock_args = Mock()
mock_args.monitor_action = "resume"
mock_args.monitor = ["test*"]
mock_args.group = None
mock_args.all = False
mock_client.find_monitors_by_pattern.return_value = [
{"id": 1, "name": "test monitor"}
]
mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."}
# Execute
result = handle_monitor_command(mock_args, mock_client)
# Verify
assert result is True
mock_client.api.resume_monitor.assert_called_once_with(1)
def test_handle_monitor_command_resume_all(self, mock_client, mock_monitors):
"""Test monitor command handler for resume all action"""
# Setup
mock_args = Mock()
mock_args.monitor_action = "resume"
mock_args.monitor = None
mock_args.group = None
mock_args.all = True
mock_client.api.get_monitors.return_value = mock_monitors
mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."}
# Execute
result = handle_monitor_command(mock_args, mock_client)
# Verify
assert result is True
# Should resume paused monitors (monitors 2 and 4)
assert mock_client.api.resume_monitor.call_count == 2
def test_handle_monitor_command_unknown_action(self, mock_client, capsys):
"""Test monitor command handler with unknown action"""
# Setup
mock_args = Mock()
mock_args.monitor_action = "unknown"
# Execute
result = handle_monitor_command(mock_args, mock_client)
# Verify
assert result is False
captured = capsys.readouterr()
assert "Unknown monitor action. Use --help for usage information." in captured.out