diff --git a/Makefile b/Makefile index 6aeda9b..cd01bbc 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: clean build install test help +.PHONY: clean build install test test-deps help # Default target help: @@ -6,7 +6,8 @@ help: @echo " clean - Remove build artifacts and cache files" @echo " build - Build the wheel package" @echo " install - Install the package in development mode" - @echo " test - Run tests (if available)" + @echo " test - Run the test suite" + @echo " test-deps - Install test dependencies" @echo " help - Show this help message" # Clean build artifacts @@ -33,10 +34,15 @@ install: @echo "Installing package in development mode..." pip install -e . -# Test the package (placeholder for when tests are added) +# Install test dependencies +test-deps: + @echo "Installing test dependencies..." + pip install -e ".[test]" + +# Test the package test: @echo "Running tests..." - @echo "No tests configured yet." + python3 run_tests.py # Rebuild and reinstall (useful during development) dev: clean build diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..ed3f5dd --- /dev/null +++ b/pytest.ini @@ -0,0 +1,14 @@ +[tool:pytest] +testpaths = tests +python_files = test_*.py +python_classes = Test* +python_functions = test_* +pythonpath = src +addopts = + -v + --tb=short + --strict-markers + --disable-warnings +filterwarnings = + ignore::DeprecationWarning + ignore::PendingDeprecationWarning \ No newline at end of file diff --git a/run_tests.py b/run_tests.py new file mode 100755 index 0000000..94abd56 --- /dev/null +++ b/run_tests.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 + +""" +Test runner script for kumacli + +Usage: + python run_tests.py # Run all tests + python run_tests.py --cov # Run tests with coverage + python run_tests.py tests/test_info.py # Run specific test file +""" + +import sys +import subprocess + + +def run_tests(args=None): + """Run pytest with optional arguments""" + # Use python3 explicitly for compatibility + cmd = ["python3", "-m", "pytest"] + + if args: + cmd.extend(args) + else: + cmd.extend([ + "tests/", + "-v", + "--tb=short" + ]) + + try: + result = subprocess.run(cmd, check=True) + return result.returncode + except subprocess.CalledProcessError as e: + print(f"Tests failed with exit code: {e.returncode}") + return e.returncode + except FileNotFoundError: + print("pytest not found. Install with: pip install pytest") + return 1 + + +def main(): + """Main entry point""" + if len(sys.argv) > 1: + # Pass through command line arguments + args = sys.argv[1:] + else: + args = None + + exit_code = run_tests(args) + sys.exit(exit_code) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/setup.py b/setup.py index 89e957b..25ac2f1 100644 --- a/setup.py +++ b/setup.py @@ -34,6 +34,16 @@ setup( install_requires=[ "uptime-kuma-api>=1.0.0", ], + extras_require={ + "dev": [ + "pytest>=6.0", + "pytest-cov>=2.0", + ], + "test": [ + "pytest>=6.0", + "pytest-cov>=2.0", + ], + }, entry_points={ "console_scripts": [ "kumacli=kumacli.kumacli:main", diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..bb58973 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,100 @@ +# KumaCLI Tests + +This directory contains the test suite for kumacli. + +## Running Tests + +### Prerequisites + +Install test dependencies: +```bash +pip install -e ".[test]" +# or +pip install pytest pytest-cov +``` + +### Run All Tests + +```bash +# Using pytest directly +python3 -m pytest + +# Using the test runner script +python3 run_tests.py + +# From the project root +python3 -m pytest tests/ +``` + +### Run Specific Tests + +```bash +# Test a specific file +pytest tests/test_info.py + +# Test a specific class +pytest tests/test_monitor.py::TestMonitorCommands + +# Test a specific method +pytest tests/test_monitor.py::TestMonitorCommands::test_pause_monitors_by_pattern +``` + +### Run Tests with Coverage + +```bash +pytest --cov=kumacli --cov-report=html +python run_tests.py --cov +``` + +### Test Options + +```bash +# Verbose output +pytest -v + +# Stop on first failure +pytest -x + +# Run tests in parallel (requires pytest-xdist) +pytest -n auto +``` + +## Test Structure + +- `conftest.py` - Shared fixtures and test configuration +- `test_info.py` - Tests for the info command +- `test_monitor.py` - Tests for monitor commands (list, pause, resume) +- `test_maintenance.py` - Tests for maintenance commands +- `test_client.py` - Tests for the KumaClient class +- `test_cli_integration.py` - Integration tests for CLI functionality + +## Test Coverage + +The tests cover: + +- ✅ Command argument parsing +- ✅ API method calls and responses +- ✅ Error handling and edge cases +- ✅ Help message functionality +- ✅ Monitor pause/resume operations +- ✅ Maintenance operations +- ✅ Client utility functions +- ✅ Integration between components + +## Mock Strategy + +Tests use unittest.mock to: +- Mock the UptimeKumaApi calls +- Simulate API responses and errors +- Test command logic without requiring a live server +- Verify correct API method calls with expected parameters + +## Adding New Tests + +When adding new functionality: + +1. Add unit tests for the new commands/methods +2. Add integration tests if the feature involves multiple components +3. Test both success and error cases +4. Mock external dependencies (API calls, file operations) +5. Use descriptive test names that explain what is being tested \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..a7abb47 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +# Test package for kumacli \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..d0d185a --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 + +import sys +import os +# Add the src directory to Python path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src')) + +import pytest +from unittest.mock import Mock, MagicMock +from kumacli.client import KumaClient + + +@pytest.fixture +def mock_client(): + """Create a mock KumaClient for testing""" + client = Mock(spec=KumaClient) + client.api = Mock() + return client + + +@pytest.fixture +def mock_monitors(): + """Sample monitor data for testing""" + return [ + { + "id": 1, + "name": "Test Monitor 1", + "type": "http", + "url": "https://example.com", + "active": True, + "parent": None + }, + { + "id": 2, + "name": "Test Monitor 2", + "type": "http", + "url": "https://test.com", + "active": False, + "parent": None + }, + { + "id": 3, + "name": "Group Monitor", + "type": "group", + "active": True, + "parent": None + }, + { + "id": 4, + "name": "Child Monitor", + "type": "http", + "url": "https://child.com", + "active": False, + "parent": 3 + } + ] + + +@pytest.fixture +def mock_maintenances(): + """Sample maintenance data for testing""" + return [ + { + "id": 1, + "title": "Test Maintenance", + "description": "Test maintenance description", + "strategy": "single", + "active": True + }, + { + "id": 2, + "title": "Inactive Maintenance", + "description": "Inactive maintenance description", + "strategy": "single", + "active": False + } + ] \ No newline at end of file diff --git a/tests/test_cli_integration.py b/tests/test_cli_integration.py new file mode 100644 index 0000000..d6f0332 --- /dev/null +++ b/tests/test_cli_integration.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python3 + +import pytest +from unittest.mock import Mock, patch, MagicMock +import argparse +from io import StringIO +import sys + +from kumacli.cmd.monitor import setup_monitor_parser, handle_monitor_command +from kumacli.cmd.maintenance import setup_maintenance_parser, handle_maintenance_command +from kumacli.cmd.info import setup_info_parser, handle_info_command + + +class TestCLIIntegration: + def test_monitor_parser_setup(self): + """Test monitor parser setup""" + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="resource") + + monitor_parser = setup_monitor_parser(subparsers) + + # Verify parser is created + assert monitor_parser is not None + assert hasattr(setup_monitor_parser, '_parser') + + def test_maintenance_parser_setup(self): + """Test maintenance parser setup""" + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="resource") + + maintenance_parser = setup_maintenance_parser(subparsers) + + # Verify parser is created + assert maintenance_parser is not None + assert hasattr(setup_maintenance_parser, '_parser') + + def test_info_parser_setup(self): + """Test info parser setup""" + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="resource") + + info_parser = setup_info_parser(subparsers) + + # Verify parser is created + assert info_parser is not None + + def test_monitor_help_message(self, mock_client, capsys): + """Test monitor command shows help when no action specified""" + # Setup + mock_args = Mock() + mock_args.monitor_action = None + + # Setup parser reference + mock_parser = Mock() + setup_monitor_parser._parser = mock_parser + + # Execute + result = handle_monitor_command(mock_args, mock_client) + + # Verify + assert result is False + mock_parser.print_help.assert_called_once() + + def test_maintenance_help_message(self, mock_client, capsys): + """Test maintenance command shows help when no action specified""" + # Setup + mock_args = Mock() + mock_args.maintenance_action = None + + # Setup parser reference + mock_parser = Mock() + setup_maintenance_parser._parser = mock_parser + + # Execute + result = handle_maintenance_command(mock_args, mock_client) + + # Verify + assert result is False + mock_parser.print_help.assert_called_once() + + def test_monitor_command_with_full_args(self, mock_client): + """Test monitor command with complete argument structure""" + # Setup + mock_args = Mock() + mock_args.monitor_action = "pause" + mock_args.monitor = ["test*"] + mock_args.group = ["web-services"] + + # Mock client methods + mock_client.find_monitors_by_pattern.return_value = [ + {"id": 1, "name": "test-monitor"} + ] + mock_client.get_monitors_in_groups.return_value = [ + {"id": 2, "name": "web-service-monitor"} + ] + mock_client.api.pause_monitor.return_value = {"msg": "Paused Successfully."} + + # Execute + result = handle_monitor_command(mock_args, mock_client) + + # Verify + assert result is True + mock_client.find_monitors_by_pattern.assert_called_once_with(["test*"]) + mock_client.get_monitors_in_groups.assert_called_once_with(["web-services"]) + # Should pause both monitors (deduplicated) + assert mock_client.api.pause_monitor.call_count == 2 + + def test_resume_all_monitors_integration(self, mock_client, mock_monitors): + """Test resume all monitors integration""" + # Setup + mock_args = Mock() + mock_args.monitor_action = "resume" + mock_args.monitor = None + mock_args.group = None + mock_args.all = True + + mock_client.api.get_monitors.return_value = mock_monitors + mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."} + + # Execute + result = handle_monitor_command(mock_args, mock_client) + + # Verify + assert result is True + mock_client.api.get_monitors.assert_called_once() + # Should resume only paused monitors (ID 2 and 4 from mock_monitors) + assert mock_client.api.resume_monitor.call_count == 2 + mock_client.api.resume_monitor.assert_any_call(2) + mock_client.api.resume_monitor.assert_any_call(4) + + +class TestArgumentParsing: + def test_monitor_list_arguments(self): + """Test monitor list command argument parsing""" + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="resource") + setup_monitor_parser(subparsers) + + # Test with monitor patterns + args = parser.parse_args(["monitor", "list", "--monitor", "web*", "--monitor", "api*"]) + assert args.resource == "monitor" + assert args.monitor_action == "list" + assert args.monitor == ["web*", "api*"] + + def test_monitor_pause_arguments(self): + """Test monitor pause command argument parsing""" + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="resource") + setup_monitor_parser(subparsers) + + # Test with group patterns + args = parser.parse_args(["monitor", "pause", "--group", "production"]) + assert args.resource == "monitor" + assert args.monitor_action == "pause" + assert args.group == ["production"] + + def test_monitor_resume_all_arguments(self): + """Test monitor resume all command argument parsing""" + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="resource") + setup_monitor_parser(subparsers) + + # Test with --all flag + args = parser.parse_args(["monitor", "resume", "--all"]) + assert args.resource == "monitor" + assert args.monitor_action == "resume" + assert args.all is True + + def test_maintenance_add_arguments(self): + """Test maintenance add command argument parsing""" + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="resource") + setup_maintenance_parser(subparsers) + + # Test maintenance add + args = parser.parse_args([ + "maintenance", "add", + "--title", "Server Update", + "--description", "Updating server software", + "--duration", "2h", + "--monitor", "server*" + ]) + assert args.resource == "maintenance" + assert args.maintenance_action == "add" + assert args.title == "Server Update" + assert args.description == "Updating server software" + assert args.duration == "2h" + assert args.monitor == ["server*"] + + def test_maintenance_delete_arguments(self): + """Test maintenance delete command argument parsing""" + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="resource") + setup_maintenance_parser(subparsers) + + # Test delete by ID + args = parser.parse_args(["maintenance", "delete", "--id", "123"]) + assert args.resource == "maintenance" + assert args.maintenance_action == "delete" + assert args.id == 123 + + # Test delete all + args = parser.parse_args(["maintenance", "delete", "--all"]) + assert args.resource == "maintenance" + assert args.maintenance_action == "delete" + assert args.all is True + + def test_info_arguments(self): + """Test info command argument parsing""" + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="resource") + setup_info_parser(subparsers) + + # Test info command + args = parser.parse_args(["info"]) + assert args.resource == "info" + + +class TestErrorHandling: + def test_monitor_command_resilience(self, mock_client, capsys): + """Test monitor command handles various error conditions""" + # Setup + mock_args = Mock() + mock_args.monitor_action = "pause" + mock_args.monitor = ["nonexistent*"] + mock_args.group = None + + # Mock no matches found + mock_client.find_monitors_by_pattern.return_value = [] + + # Execute + result = handle_monitor_command(mock_args, mock_client) + + # Verify error handling + assert result is True # Command completes even with no matches + captured = capsys.readouterr() + assert "Error: No monitors found matching the specified patterns or groups" in captured.out + + def test_maintenance_command_resilience(self, mock_client, capsys): + """Test maintenance command handles API errors""" + # Setup + mock_args = Mock() + mock_args.maintenance_action = "list" + + # Mock API error + mock_client.api.get_maintenances.side_effect = Exception("Connection timeout") + + # Execute + result = handle_maintenance_command(mock_args, mock_client) + + # Verify error handling + assert result is True # Command completes even with error + captured = capsys.readouterr() + assert "Error listing maintenances: Connection timeout" in captured.out \ No newline at end of file diff --git a/tests/test_client.py b/tests/test_client.py new file mode 100644 index 0000000..ca6706d --- /dev/null +++ b/tests/test_client.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python3 + +import pytest +from unittest.mock import Mock, patch +from datetime import datetime, timedelta + +from kumacli.client import KumaClient + + +class TestKumaClient: + def test_parse_duration_minutes(self): + """Test parsing duration in minutes""" + client = KumaClient("http://test.com") + + assert client.parse_duration("90m") == 5400 # 90 * 60 + assert client.parse_duration("1m") == 60 + assert client.parse_duration("120m") == 7200 + + def test_parse_duration_hours(self): + """Test parsing duration in hours""" + client = KumaClient("http://test.com") + + assert client.parse_duration("1h") == 3600 # 1 * 3600 + assert client.parse_duration("2h") == 7200 + assert client.parse_duration("24h") == 86400 + + def test_parse_duration_seconds(self): + """Test parsing duration in seconds""" + client = KumaClient("http://test.com") + + assert client.parse_duration("3600s") == 3600 + assert client.parse_duration("60s") == 60 + assert client.parse_duration("1s") == 1 + + def test_parse_duration_default(self): + """Test parsing duration with default value""" + client = KumaClient("http://test.com") + + assert client.parse_duration(None) == 5400 # Default 90 minutes + assert client.parse_duration("") == 5400 + + def test_parse_duration_invalid(self): + """Test parsing invalid duration format""" + client = KumaClient("http://test.com") + + with pytest.raises(ValueError, match="Invalid duration format"): + client.parse_duration("invalid") + + with pytest.raises(ValueError, match="Invalid duration format"): + client.parse_duration("90x") + + with pytest.raises(ValueError, match="Invalid duration format"): + client.parse_duration("90") + + def test_parse_start_time_none(self): + """Test parsing start time with None (current time)""" + client = KumaClient("http://test.com") + + before = datetime.utcnow() + result = client.parse_start_time(None) + after = datetime.utcnow() + + assert before <= result <= after + + def test_parse_start_time_iso_format(self): + """Test parsing ISO format start time""" + client = KumaClient("http://test.com") + + # Test ISO format with Z + result = client.parse_start_time("2023-12-25T10:30:00Z") + expected = datetime(2023, 12, 25, 10, 30, 0) + assert result == expected + + # Test ISO format with timezone + result = client.parse_start_time("2023-12-25T10:30:00+00:00") + assert result == expected + + def test_parse_start_time_common_formats(self): + """Test parsing common date/time formats""" + client = KumaClient("http://test.com") + + # Full datetime + result = client.parse_start_time("2023-12-25 10:30:00") + expected = datetime(2023, 12, 25, 10, 30, 0) + assert result == expected + + # Date and hour:minute + result = client.parse_start_time("2023-12-25 10:30") + expected = datetime(2023, 12, 25, 10, 30, 0) + assert result == expected + + # Date only + result = client.parse_start_time("2023-12-25") + expected = datetime(2023, 12, 25, 0, 0, 0) + assert result == expected + + def test_parse_start_time_invalid(self): + """Test parsing invalid start time format""" + client = KumaClient("http://test.com") + + with pytest.raises(ValueError, match="Invalid start time format"): + client.parse_start_time("invalid-date") + + with pytest.raises(ValueError, match="Invalid start time format"): + client.parse_start_time("2023-13-45") + + def test_find_monitors_by_pattern_success(self): + """Test finding monitors by pattern successfully""" + client = KumaClient("http://test.com") + client.api = Mock() + + mock_monitors = [ + {"id": 1, "name": "Web Server"}, + {"id": 2, "name": "API Server"}, + {"id": 3, "name": "Database"}, + {"id": 4, "name": "Web Frontend"} + ] + client.api.get_monitors.return_value = mock_monitors + + # Test exact match + result = client.find_monitors_by_pattern(["Web Server"]) + assert len(result) == 1 + assert result[0]["name"] == "Web Server" + + # Test wildcard pattern + result = client.find_monitors_by_pattern(["Web*"]) + assert len(result) == 2 + names = [m["name"] for m in result] + assert "Web Server" in names + assert "Web Frontend" in names + + def test_find_monitors_by_pattern_case_insensitive(self): + """Test finding monitors by pattern is case insensitive""" + client = KumaClient("http://test.com") + client.api = Mock() + + mock_monitors = [ + {"id": 1, "name": "Web Server"}, + {"id": 2, "name": "API Server"} + ] + client.api.get_monitors.return_value = mock_monitors + + # Test case insensitive matching + result = client.find_monitors_by_pattern(["web*"]) + assert len(result) == 1 + assert result[0]["name"] == "Web Server" + + def test_find_monitors_by_pattern_no_matches(self): + """Test finding monitors with no matches""" + client = KumaClient("http://test.com") + client.api = Mock() + + mock_monitors = [ + {"id": 1, "name": "Web Server"} + ] + client.api.get_monitors.return_value = mock_monitors + + result = client.find_monitors_by_pattern(["Database*"]) + assert len(result) == 0 + + def test_find_monitors_by_pattern_duplicates(self): + """Test finding monitors removes duplicates""" + client = KumaClient("http://test.com") + client.api = Mock() + + mock_monitors = [ + {"id": 1, "name": "Web Server"} + ] + client.api.get_monitors.return_value = mock_monitors + + # Same monitor should match both patterns + result = client.find_monitors_by_pattern(["Web*", "*Server"]) + assert len(result) == 1 + assert result[0]["name"] == "Web Server" + + def test_find_monitors_by_pattern_api_error(self, capsys): + """Test finding monitors handles API errors""" + client = KumaClient("http://test.com") + client.api = Mock() + + client.api.get_monitors.side_effect = Exception("API Error") + + result = client.find_monitors_by_pattern(["Web*"]) + assert len(result) == 0 + + captured = capsys.readouterr() + assert "Error finding monitors: API Error" in captured.out + + @patch('kumacli.client.UptimeKumaApi') + def test_connect_success(self, mock_api_class, capsys): + """Test successful connection""" + mock_api = Mock() + mock_api_class.return_value = mock_api + mock_api.login.return_value = True + + client = KumaClient("http://test.com", "user", "pass") + result = client.connect() + + assert result is True + assert client.api is mock_api + mock_api_class.assert_called_once_with("http://test.com") + mock_api.login.assert_called_once_with("user", "pass") + + captured = capsys.readouterr() + assert "Connected to http://test.com" in captured.out + + @patch('kumacli.client.UptimeKumaApi') + def test_connect_failure(self, mock_api_class, capsys): + """Test connection failure""" + mock_api_class.side_effect = Exception("Connection failed") + + client = KumaClient("http://test.com", "user", "pass") + result = client.connect() + + assert result is False + captured = capsys.readouterr() + assert "Failed to connect: Connection failed" in captured.out + + def test_disconnect(self): + """Test disconnection""" + client = KumaClient("http://test.com") + client.api = Mock() + + client.disconnect() + + client.api.disconnect.assert_called_once() \ No newline at end of file diff --git a/tests/test_info.py b/tests/test_info.py new file mode 100644 index 0000000..56bbcef --- /dev/null +++ b/tests/test_info.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 + +import pytest +from unittest.mock import Mock, patch +from io import StringIO +import sys + +from kumacli.cmd.info import InfoCommands, handle_info_command + + +class TestInfoCommands: + def test_get_info_success(self, mock_client, capsys): + """Test successful info retrieval""" + # Setup + mock_info_data = { + "version": "1.23.0", + "hostname": "kuma-server", + "primaryBaseURL": "https://status.example.com" + } + mock_client.api.info.return_value = mock_info_data + + info_commands = InfoCommands(mock_client) + + # Execute + info_commands.get_info() + + # Verify + mock_client.api.info.assert_called_once() + captured = capsys.readouterr() + assert "Server Information:" in captured.out + assert "version: 1.23.0" in captured.out + assert "hostname: kuma-server" in captured.out + assert "primaryBaseURL: https://status.example.com" in captured.out + + def test_get_info_empty_response(self, mock_client, capsys): + """Test info command with empty response""" + # Setup + mock_client.api.info.return_value = None + + info_commands = InfoCommands(mock_client) + + # Execute + info_commands.get_info() + + # Verify + mock_client.api.info.assert_called_once() + captured = capsys.readouterr() + assert "No server info available" in captured.out + + def test_get_info_api_error(self, mock_client, capsys): + """Test info command with API error""" + # Setup + mock_client.api.info.side_effect = Exception("Connection failed") + + info_commands = InfoCommands(mock_client) + + # Execute + info_commands.get_info() + + # Verify + mock_client.api.info.assert_called_once() + captured = capsys.readouterr() + assert "Error getting server info: Connection failed" in captured.out + + +class TestInfoCommandHandler: + def test_handle_info_command(self, mock_client): + """Test info command handler""" + # Setup + mock_args = Mock() + mock_info_data = {"version": "1.23.0"} + mock_client.api.info.return_value = mock_info_data + + # Execute + result = handle_info_command(mock_args, mock_client) + + # Verify + assert result is True + mock_client.api.info.assert_called_once() + + def test_handle_info_command_with_error(self, mock_client): + """Test info command handler with error""" + # Setup + mock_args = Mock() + mock_client.api.info.side_effect = Exception("API Error") + + # Execute + result = handle_info_command(mock_args, mock_client) + + # Verify + assert result is True # Handler always returns True + mock_client.api.info.assert_called_once() \ No newline at end of file diff --git a/tests/test_maintenance.py b/tests/test_maintenance.py new file mode 100644 index 0000000..572ebb0 --- /dev/null +++ b/tests/test_maintenance.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python3 + +import pytest +from unittest.mock import Mock, patch +from io import StringIO +import sys + +from kumacli.cmd.maintenance import MaintenanceCommands, handle_maintenance_command + + +class TestMaintenanceCommands: + def test_list_maintenances_success(self, mock_client, mock_maintenances, capsys): + """Test successful maintenance listing""" + # Setup + mock_client.api.get_maintenances.return_value = mock_maintenances + + maintenance_commands = MaintenanceCommands(mock_client) + + # Execute + maintenance_commands.list_maintenances() + + # Verify + mock_client.api.get_maintenances.assert_called_once() + captured = capsys.readouterr() + assert "Test Maintenance" in captured.out + assert "Inactive Maintenance" in captured.out + assert "Active" in captured.out + assert "Inactive" in captured.out + + def test_list_maintenances_empty(self, mock_client, capsys): + """Test maintenance listing with no maintenances""" + # Setup + mock_client.api.get_maintenances.return_value = [] + + maintenance_commands = MaintenanceCommands(mock_client) + + # Execute + maintenance_commands.list_maintenances() + + # Verify + captured = capsys.readouterr() + assert "No maintenances found" in captured.out + + def test_list_maintenances_api_error(self, mock_client, capsys): + """Test maintenance listing with API error""" + # Setup + mock_client.api.get_maintenances.side_effect = Exception("API Error") + + maintenance_commands = MaintenanceCommands(mock_client) + + # Execute + maintenance_commands.list_maintenances() + + # Verify + captured = capsys.readouterr() + assert "Error listing maintenances: API Error" in captured.out + + def test_delete_maintenance_by_id(self, mock_client, capsys): + """Test deleting maintenance by ID""" + # Setup + mock_maintenance = {"id": 1, "title": "Test Maintenance"} + mock_client.api.get_maintenance.return_value = mock_maintenance + mock_client.api.delete_maintenance.return_value = {"msg": "Deleted Successfully"} + + maintenance_commands = MaintenanceCommands(mock_client) + + # Execute + maintenance_commands.delete_maintenance(maintenance_id=1) + + # Verify + mock_client.api.get_maintenance.assert_called_once_with(1) + mock_client.api.delete_maintenance.assert_called_once_with(1) + captured = capsys.readouterr() + assert "Successfully deleted maintenance 'Test Maintenance' (ID: 1)" in captured.out + + def test_delete_all_maintenances(self, mock_client, mock_maintenances, capsys): + """Test deleting all maintenances""" + # Setup + mock_client.api.get_maintenances.return_value = mock_maintenances + mock_client.api.delete_maintenance.return_value = {"msg": "Deleted Successfully"} + + maintenance_commands = MaintenanceCommands(mock_client) + + # Execute + maintenance_commands.delete_maintenance(delete_all=True) + + # Verify + assert mock_client.api.delete_maintenance.call_count == 2 + captured = capsys.readouterr() + assert "Found 2 maintenances to delete:" in captured.out + assert "Successfully deleted 2 out of 2 maintenances" in captured.out + + def test_delete_maintenance_no_params(self, mock_client, capsys): + """Test deleting maintenance without parameters""" + maintenance_commands = MaintenanceCommands(mock_client) + + # Execute + maintenance_commands.delete_maintenance() + + # Verify + captured = capsys.readouterr() + assert "Error: Either --id or --all flag is required for delete operation" in captured.out + + +class TestMaintenanceCommandHandler: + def test_handle_maintenance_command_no_action(self, mock_client, capsys): + """Test maintenance command handler with no action""" + # Setup + mock_args = Mock() + mock_args.maintenance_action = None + + # Mock the parser setup + with patch('kumacli.cmd.maintenance.setup_maintenance_parser') as mock_setup: + mock_parser = Mock() + mock_setup._parser = mock_parser + + # Execute + result = handle_maintenance_command(mock_args, mock_client) + + # Verify + assert result is False + + def test_handle_maintenance_command_list(self, mock_client, mock_maintenances): + """Test maintenance command handler for list action""" + # Setup + mock_args = Mock() + mock_args.maintenance_action = "list" + mock_client.api.get_maintenances.return_value = mock_maintenances + + # Execute + result = handle_maintenance_command(mock_args, mock_client) + + # Verify + assert result is True + mock_client.api.get_maintenances.assert_called_once() + + def test_handle_maintenance_command_delete(self, mock_client): + """Test maintenance command handler for delete action""" + # Setup + mock_args = Mock() + mock_args.maintenance_action = "delete" + mock_args.id = 1 + mock_args.all = False + + mock_maintenance = {"id": 1, "title": "Test Maintenance"} + mock_client.api.get_maintenance.return_value = mock_maintenance + mock_client.api.delete_maintenance.return_value = {"msg": "Deleted Successfully"} + + # Execute + result = handle_maintenance_command(mock_args, mock_client) + + # Verify + assert result is True + mock_client.api.delete_maintenance.assert_called_once_with(1) + + def test_handle_maintenance_command_unknown_action(self, mock_client, capsys): + """Test maintenance command handler with unknown action""" + # Setup + mock_args = Mock() + mock_args.maintenance_action = "unknown" + + # Execute + result = handle_maintenance_command(mock_args, mock_client) + + # Verify + assert result is False + captured = capsys.readouterr() + assert "Unknown maintenance action. Use --help for usage information." in captured.out \ No newline at end of file diff --git a/tests/test_monitor.py b/tests/test_monitor.py new file mode 100644 index 0000000..157d238 --- /dev/null +++ b/tests/test_monitor.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python3 + +import pytest +from unittest.mock import Mock, patch +from io import StringIO +import sys + +from kumacli.cmd.monitor import MonitorCommands, handle_monitor_command, setup_monitor_parser + + +class TestMonitorCommands: + def test_pause_monitors_by_pattern(self, mock_client, mock_monitors, capsys): + """Test pausing monitors by pattern""" + # Setup + mock_client.api.get_monitors.return_value = mock_monitors + mock_client.find_monitors_by_pattern.return_value = [ + {"id": 1, "name": "Test Monitor 1"}, + {"id": 2, "name": "Test Monitor 2"} + ] + mock_client.api.pause_monitor.return_value = {"msg": "Paused Successfully."} + + monitor_commands = MonitorCommands(mock_client) + + # Execute + monitor_commands.pause_monitors(monitor_patterns=["Test*"]) + + # Verify + mock_client.find_monitors_by_pattern.assert_called_once_with(["Test*"]) + assert mock_client.api.pause_monitor.call_count == 2 + mock_client.api.pause_monitor.assert_any_call(1) + mock_client.api.pause_monitor.assert_any_call(2) + + captured = capsys.readouterr() + assert "Found 2 matching monitors to pause:" in captured.out + assert "Paused monitor 'Test Monitor 1' (ID: 1)" in captured.out + assert "Paused monitor 'Test Monitor 2' (ID: 2)" in captured.out + assert "Successfully paused 2 out of 2 monitors" in captured.out + + def test_pause_monitors_by_group(self, mock_client, mock_monitors, capsys): + """Test pausing monitors by group""" + # Setup + mock_client.get_monitors_in_groups.return_value = [ + {"id": 4, "name": "Child Monitor"} + ] + mock_client.api.pause_monitor.return_value = {"msg": "Paused Successfully."} + + monitor_commands = MonitorCommands(mock_client) + + # Execute + monitor_commands.pause_monitors(group_patterns=["Group*"]) + + # Verify + mock_client.get_monitors_in_groups.assert_called_once_with(["Group*"]) + mock_client.api.pause_monitor.assert_called_once_with(4) + + captured = capsys.readouterr() + assert "Found 1 matching monitors to pause:" in captured.out + assert "Paused monitor 'Child Monitor' (ID: 4)" in captured.out + + def test_pause_monitors_no_patterns(self, mock_client, capsys): + """Test pausing monitors without patterns""" + monitor_commands = MonitorCommands(mock_client) + + # Execute + monitor_commands.pause_monitors() + + # Verify + captured = capsys.readouterr() + assert "Error: Either --monitor or --group flag is required." in captured.out + + def test_pause_monitors_no_matches(self, mock_client, capsys): + """Test pausing monitors with no matches""" + # Setup + mock_client.find_monitors_by_pattern.return_value = [] + + monitor_commands = MonitorCommands(mock_client) + + # Execute + monitor_commands.pause_monitors(monitor_patterns=["NonExistent*"]) + + # Verify + captured = capsys.readouterr() + assert "Error: No monitors found matching the specified patterns or groups" in captured.out + + def test_pause_monitors_api_error(self, mock_client, capsys): + """Test pausing monitors with API error""" + # Setup + mock_client.find_monitors_by_pattern.return_value = [ + {"id": 1, "name": "Test Monitor 1"} + ] + mock_client.api.pause_monitor.side_effect = Exception("API Error") + + monitor_commands = MonitorCommands(mock_client) + + # Execute + monitor_commands.pause_monitors(monitor_patterns=["Test*"]) + + # Verify + captured = capsys.readouterr() + assert "Failed to pause monitor 'Test Monitor 1': API Error" in captured.out + assert "Successfully paused 0 out of 1 monitors" in captured.out + + def test_resume_monitors_by_pattern(self, mock_client, mock_monitors, capsys): + """Test resuming monitors by pattern""" + # Setup + mock_client.find_monitors_by_pattern.return_value = [ + {"id": 2, "name": "Test Monitor 2"} + ] + mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."} + + monitor_commands = MonitorCommands(mock_client) + + # Execute + monitor_commands.resume_monitors(monitor_patterns=["Test*"]) + + # Verify + mock_client.api.resume_monitor.assert_called_once_with(2) + + captured = capsys.readouterr() + assert "Found 1 matching monitors to resume:" in captured.out + assert "Resumed monitor 'Test Monitor 2' (ID: 2)" in captured.out + + def test_resume_monitors_all_paused(self, mock_client, mock_monitors, capsys): + """Test resuming all paused monitors""" + # Setup + mock_client.api.get_monitors.return_value = mock_monitors + mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."} + + monitor_commands = MonitorCommands(mock_client) + + # Execute + monitor_commands.resume_monitors(resume_all=True) + + # Verify + # Should resume monitors with active=False (monitors 2 and 4) + assert mock_client.api.resume_monitor.call_count == 2 + mock_client.api.resume_monitor.assert_any_call(2) + mock_client.api.resume_monitor.assert_any_call(4) + + captured = capsys.readouterr() + assert "Found 2 paused monitors to resume:" in captured.out + assert "Successfully resumed 2 out of 2 monitors" in captured.out + + def test_resume_monitors_all_no_paused(self, mock_client, capsys): + """Test resuming all paused monitors when none are paused""" + # Setup + active_monitors = [ + {"id": 1, "name": "Active Monitor", "active": True} + ] + mock_client.api.get_monitors.return_value = active_monitors + + monitor_commands = MonitorCommands(mock_client) + + # Execute + monitor_commands.resume_monitors(resume_all=True) + + # Verify + captured = capsys.readouterr() + assert "No paused monitors found to resume" in captured.out + + def test_resume_monitors_no_args(self, mock_client, capsys): + """Test resuming monitors without any arguments""" + monitor_commands = MonitorCommands(mock_client) + + # Execute + monitor_commands.resume_monitors() + + # Verify + captured = capsys.readouterr() + assert "Error: Either --monitor, --group, or --all flag is required." in captured.out + + +class TestMonitorCommandHandler: + def test_handle_monitor_command_no_action(self, mock_client, capsys): + """Test monitor command handler with no action""" + # Setup + mock_args = Mock() + mock_args.monitor_action = None + + # Mock the parser setup to avoid importing issues + with patch('kumacli.cmd.monitor.setup_monitor_parser') as mock_setup: + mock_parser = Mock() + mock_setup._parser = mock_parser + + # Execute + result = handle_monitor_command(mock_args, mock_client) + + # Verify + assert result is False + + def test_handle_monitor_command_pause(self, mock_client): + """Test monitor command handler for pause action""" + # Setup + mock_args = Mock() + mock_args.monitor_action = "pause" + mock_args.monitor = ["test*"] + mock_args.group = None + + mock_client.find_monitors_by_pattern.return_value = [ + {"id": 1, "name": "test monitor"} + ] + mock_client.api.pause_monitor.return_value = {"msg": "Paused Successfully."} + + # Execute + result = handle_monitor_command(mock_args, mock_client) + + # Verify + assert result is True + mock_client.api.pause_monitor.assert_called_once_with(1) + + def test_handle_monitor_command_resume(self, mock_client): + """Test monitor command handler for resume action""" + # Setup + mock_args = Mock() + mock_args.monitor_action = "resume" + mock_args.monitor = ["test*"] + mock_args.group = None + mock_args.all = False + + mock_client.find_monitors_by_pattern.return_value = [ + {"id": 1, "name": "test monitor"} + ] + mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."} + + # Execute + result = handle_monitor_command(mock_args, mock_client) + + # Verify + assert result is True + mock_client.api.resume_monitor.assert_called_once_with(1) + + def test_handle_monitor_command_resume_all(self, mock_client, mock_monitors): + """Test monitor command handler for resume all action""" + # Setup + mock_args = Mock() + mock_args.monitor_action = "resume" + mock_args.monitor = None + mock_args.group = None + mock_args.all = True + + mock_client.api.get_monitors.return_value = mock_monitors + mock_client.api.resume_monitor.return_value = {"msg": "Resumed Successfully."} + + # Execute + result = handle_monitor_command(mock_args, mock_client) + + # Verify + assert result is True + # Should resume paused monitors (monitors 2 and 4) + assert mock_client.api.resume_monitor.call_count == 2 + + def test_handle_monitor_command_unknown_action(self, mock_client, capsys): + """Test monitor command handler with unknown action""" + # Setup + mock_args = Mock() + mock_args.monitor_action = "unknown" + + # Execute + result = handle_monitor_command(mock_args, mock_client) + + # Verify + assert result is False + captured = capsys.readouterr() + assert "Unknown monitor action. Use --help for usage information." in captured.out \ No newline at end of file