youtube-summarizer/backend/test_runner/cli.py

643 lines
21 KiB
Python

"""
Test Runner CLI Interface
Command-line interface for the YouTube Summarizer test runner with comprehensive
options for different execution modes, reporting, and configuration.
"""
import argparse
import asyncio
import sys
from pathlib import Path
from typing import List, Optional
import logging
# Add the project root to Python path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from test_runner.core.test_runner import TestRunner
from test_runner.core.models import TestRunnerMode, ReportFormat, TestCategory
from test_runner.config.test_config import TestConfig
class TestRunnerCLI:
"""Command-line interface for the test runner."""
def __init__(self):
"""Initialize CLI."""
self.parser = self._create_parser()
def _create_parser(self) -> argparse.ArgumentParser:
"""Create argument parser."""
parser = argparse.ArgumentParser(
prog="test-runner",
description="YouTube Summarizer Test Runner - Comprehensive test execution and reporting",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run all tests with coverage
python -m test_runner.cli run-all --coverage --report html
# Run only unit tests in parallel
python -m test_runner.cli run-unit --parallel --verbose
# Run specific test patterns
python -m test_runner.cli run-specific test_auth.py test_pipeline.py
# Run tests changed since last commit
python -m test_runner.cli run-changed --since HEAD~1
# Generate reports from previous run
python -m test_runner.cli report --format html json
# List available tests
python -m test_runner.cli list --category unit
# Show test trends
python -m test_runner.cli trends --days 7
"""
)
# Global options
parser.add_argument(
"--config",
type=Path,
help="Path to custom configuration file"
)
parser.add_argument(
"--project-root",
type=Path,
help="Project root directory (auto-detected if not specified)"
)
parser.add_argument(
"--log-level",
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
default="INFO",
help="Logging level"
)
# Subcommands
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# Run all tests
run_all_parser = subparsers.add_parser(
"run-all",
help="Run all test categories"
)
self._add_execution_args(run_all_parser)
# Run unit tests
run_unit_parser = subparsers.add_parser(
"run-unit",
help="Run unit tests only"
)
self._add_execution_args(run_unit_parser)
# Run integration tests
run_integration_parser = subparsers.add_parser(
"run-integration",
help="Run integration and API tests"
)
self._add_execution_args(run_integration_parser)
# Run frontend tests
run_frontend_parser = subparsers.add_parser(
"run-frontend",
help="Run frontend tests"
)
self._add_execution_args(run_frontend_parser)
# Run E2E tests
run_e2e_parser = subparsers.add_parser(
"run-e2e",
help="Run end-to-end tests"
)
self._add_execution_args(run_e2e_parser)
# Run performance tests
run_perf_parser = subparsers.add_parser(
"run-performance",
help="Run performance tests"
)
self._add_execution_args(run_perf_parser)
# Run specific tests
run_specific_parser = subparsers.add_parser(
"run-specific",
help="Run specific test files or patterns"
)
self._add_execution_args(run_specific_parser)
run_specific_parser.add_argument(
"patterns",
nargs="+",
help="Test file patterns or names to run"
)
# Run changed tests
run_changed_parser = subparsers.add_parser(
"run-changed",
help="Run tests affected by recent changes"
)
self._add_execution_args(run_changed_parser)
run_changed_parser.add_argument(
"--since",
default="HEAD~1",
help="Git revision to compare against (default: HEAD~1)"
)
# List tests
list_parser = subparsers.add_parser(
"list",
help="List available tests"
)
list_parser.add_argument(
"--category",
choices=[cat.value for cat in TestCategory],
help="List tests in specific category"
)
list_parser.add_argument(
"--detailed",
action="store_true",
help="Show detailed test information"
)
# Generate reports
report_parser = subparsers.add_parser(
"report",
help="Generate reports from previous test run"
)
report_parser.add_argument(
"--format",
choices=[fmt.value for fmt in ReportFormat],
nargs="+",
default=["console"],
help="Report formats to generate"
)
report_parser.add_argument(
"--input",
type=Path,
help="Input JSON results file"
)
report_parser.add_argument(
"--output",
type=Path,
help="Output directory for reports"
)
# Show trends
trends_parser = subparsers.add_parser(
"trends",
help="Show test execution trends"
)
trends_parser.add_argument(
"--days",
type=int,
default=30,
help="Number of days to analyze (default: 30)"
)
trends_parser.add_argument(
"--format",
choices=["console", "json"],
default="console",
help="Output format"
)
# Configuration management
config_parser = subparsers.add_parser(
"config",
help="Manage test configuration"
)
config_subparsers = config_parser.add_subparsers(dest="config_action")
# Show config
config_subparsers.add_parser(
"show",
help="Show current configuration"
)
# Validate config
config_subparsers.add_parser(
"validate",
help="Validate configuration"
)
# Generate config
generate_config_parser = config_subparsers.add_parser(
"generate",
help="Generate default configuration file"
)
generate_config_parser.add_argument(
"--output",
type=Path,
default=Path("test_config.json"),
help="Output file path"
)
return parser
def _add_execution_args(self, parser: argparse.ArgumentParser) -> None:
"""Add common execution arguments to a parser."""
# Execution options
parser.add_argument(
"--parallel",
action="store_true",
help="Enable parallel execution"
)
parser.add_argument(
"--no-parallel",
action="store_true",
help="Disable parallel execution"
)
parser.add_argument(
"--workers",
type=int,
help="Number of parallel workers"
)
parser.add_argument(
"--fail-fast",
action="store_true",
help="Stop on first failure"
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Verbose output"
)
# Coverage options
parser.add_argument(
"--coverage",
action="store_true",
help="Enable coverage collection"
)
parser.add_argument(
"--no-coverage",
action="store_true",
help="Disable coverage collection"
)
parser.add_argument(
"--coverage-threshold",
type=float,
help="Minimum coverage percentage required"
)
# Reporting options
parser.add_argument(
"--report",
choices=[fmt.value for fmt in ReportFormat],
nargs="+",
default=["console"],
help="Report formats to generate"
)
parser.add_argument(
"--report-dir",
type=Path,
help="Directory for report output"
)
# Retry options
parser.add_argument(
"--retry-failed",
action="store_true",
help="Retry failed tests"
)
parser.add_argument(
"--max-retries",
type=int,
help="Maximum number of retries for failed tests"
)
# Timeout options
parser.add_argument(
"--timeout",
type=float,
help="Test timeout in seconds"
)
# Environment options
parser.add_argument(
"--env",
action="append",
help="Set environment variable (KEY=VALUE)"
)
async def run(self, argv: Optional[List[str]] = None) -> int:
"""
Run the CLI with the given arguments.
Args:
argv: Command line arguments (uses sys.argv if None)
Returns:
Exit code (0 for success, non-zero for failure)
"""
args = self.parser.parse_args(argv)
if not args.command:
self.parser.print_help()
return 1
try:
# Setup logging
logging.basicConfig(
level=getattr(logging, args.log_level),
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Load configuration
config = self._load_config(args)
# Override config with command line arguments
self._override_config_from_args(config, args)
# Validate configuration
issues = config.validate()
if issues:
print("Configuration validation failed:")
for issue in issues:
print(f" - {issue}")
return 1
# Execute command
return await self._execute_command(args, config)
except KeyboardInterrupt:
print("\nInterrupted by user")
return 130
except Exception as e:
print(f"Error: {e}")
if args.log_level == "DEBUG":
import traceback
traceback.print_exc()
return 1
def _load_config(self, args: argparse.Namespace) -> TestConfig:
"""Load configuration from file or defaults."""
if args.config and args.config.exists():
# Load from specific config file
config = TestConfig()
config._load_from_file(args.config)
return config
else:
# Load default configuration
project_root = args.project_root or Path.cwd()
return TestConfig.load_default(project_root)
def _override_config_from_args(self, config: TestConfig, args: argparse.Namespace) -> None:
"""Override configuration with command line arguments."""
# Execution options
if hasattr(args, 'parallel') and args.parallel:
config.parallel_execution = True
if hasattr(args, 'no_parallel') and args.no_parallel:
config.parallel_execution = False
if hasattr(args, 'workers') and args.workers:
config.max_workers = args.workers
if hasattr(args, 'fail_fast') and args.fail_fast:
config.fail_fast = True
if hasattr(args, 'verbose') and args.verbose:
config.verbose = True
# Coverage options
if hasattr(args, 'coverage') and args.coverage:
config.enable_coverage = True
if hasattr(args, 'no_coverage') and args.no_coverage:
config.enable_coverage = False
if hasattr(args, 'coverage_threshold') and args.coverage_threshold:
config.coverage_threshold = args.coverage_threshold
# Report options
if hasattr(args, 'report') and args.report:
config.report_formats = args.report
if hasattr(args, 'report_dir') and args.report_dir:
config.report_directory = str(args.report_dir)
# Retry options
if hasattr(args, 'retry_failed') and args.retry_failed:
config.retry_failed_tests = True
if hasattr(args, 'max_retries') and args.max_retries:
config.max_retries = args.max_retries
# Timeout
if hasattr(args, 'timeout') and args.timeout:
config.test_timeout = args.timeout
# Environment variables
if hasattr(args, 'env') and args.env:
for env_setting in args.env:
if '=' in env_setting:
key, value = env_setting.split('=', 1)
config.test_env_vars[key] = value
# Logging
config.log_level = args.log_level
async def _execute_command(self, args: argparse.Namespace, config: TestConfig) -> int:
"""Execute the specified command."""
if args.command.startswith("run-"):
return await self._execute_run_command(args, config)
elif args.command == "list":
return await self._execute_list_command(args, config)
elif args.command == "report":
return await self._execute_report_command(args, config)
elif args.command == "trends":
return await self._execute_trends_command(args, config)
elif args.command == "config":
return await self._execute_config_command(args, config)
else:
print(f"Unknown command: {args.command}")
return 1
async def _execute_run_command(self, args: argparse.Namespace, config: TestConfig) -> int:
"""Execute a test run command."""
# Map command to mode
mode_mapping = {
"run-all": TestRunnerMode.ALL,
"run-unit": TestRunnerMode.UNIT,
"run-integration": TestRunnerMode.INTEGRATION,
"run-frontend": TestRunnerMode.FRONTEND,
"run-e2e": TestRunnerMode.E2E,
"run-performance": TestRunnerMode.PERFORMANCE,
"run-specific": TestRunnerMode.SPECIFIC,
"run-changed": TestRunnerMode.CHANGED
}
mode = mode_mapping[args.command]
# Setup test runner
project_root = args.project_root or Path.cwd()
runner = TestRunner(config, project_root)
# Get test patterns for specific mode
test_patterns = None
if mode == TestRunnerMode.SPECIFIC:
test_patterns = args.patterns
# Prepare report formats
report_formats = [ReportFormat(fmt) for fmt in config.report_formats]
print(f"Starting test run in {mode.value} mode...")
# Execute tests
result = await runner.run_tests(
mode=mode,
test_patterns=test_patterns,
parallel=config.parallel_execution,
coverage=config.enable_coverage,
reports=report_formats,
fail_fast=config.fail_fast,
verbose=config.verbose
)
# Return appropriate exit code
return 0 if result.success else 1
async def _execute_list_command(self, args: argparse.Namespace, config: TestConfig) -> int:
"""Execute list tests command."""
project_root = args.project_root or Path.cwd()
runner = TestRunner(config, project_root)
# Get category filter
category_filter = None
if args.category:
category_filter = TestCategory(args.category)
# List tests
test_lists = await runner.list_tests(category_filter)
if not test_lists:
print("No tests found.")
return 0
# Display results
for category, tests in test_lists.items():
print(f"\n{category.value.upper()} Tests ({len(tests)}):")
print("=" * (len(category.value) + 12))
for test in tests:
if args.detailed:
# Show more detailed information
print(f"{test}")
else:
print(f" {test}")
total_tests = sum(len(tests) for tests in test_lists.values())
print(f"\nTotal: {total_tests} tests across {len(test_lists)} categories")
return 0
async def _execute_report_command(self, args: argparse.Namespace, config: TestConfig) -> int:
"""Execute report generation command."""
# This would load previous test results and generate reports
# For now, just show a message about the functionality
print("Report generation from previous results not yet implemented.")
print("Reports are automatically generated during test runs.")
if args.input:
print(f"Would process results from: {args.input}")
if args.output:
print(f"Would output to: {args.output}")
print(f"Would generate formats: {', '.join(args.format)}")
return 0
async def _execute_trends_command(self, args: argparse.Namespace, config: TestConfig) -> int:
"""Execute trends analysis command."""
from test_runner.core.reporting import TestReporter
project_root = args.project_root or Path.cwd()
reporter = TestReporter(config)
trends = reporter.get_test_trends(args.days)
if "error" in trends:
print(f"Error: {trends['error']}")
return 1
if args.format == "json":
import json
print(json.dumps(trends, indent=2, default=str))
else:
# Console format
print(f"Test Trends (Last {args.days} days)")
print("=" * 30)
print(f"Total Runs: {trends['total_runs']}")
print(f"Success Rate: {trends['success_rate']:.1f}%")
print(f"Average Execution Time: {trends['average_execution_time']:.2f}s")
print(f"Average Pass Rate: {trends['average_pass_rate']:.1f}%")
if trends['most_recent']:
recent = trends['most_recent']
print(f"\nMost Recent Run:")
print(f" Time: {recent['timestamp']}")
print(f" Success: {'' if recent['summary']['success'] else ''}")
print(f" Tests: {recent['summary']['total_tests']}")
print(f" Pass Rate: {recent['summary']['pass_rate']:.1f}%")
return 0
async def _execute_config_command(self, args: argparse.Namespace, config: TestConfig) -> int:
"""Execute configuration management command."""
if args.config_action == "show":
print("Current Configuration:")
print("=" * 20)
print(f"Parallel Execution: {config.parallel_execution}")
print(f"Max Workers: {config.max_workers}")
print(f"Coverage Enabled: {config.enable_coverage}")
print(f"Coverage Threshold: {config.coverage_threshold}%")
print(f"Test Timeout: {config.test_timeout}s")
print(f"Fail Fast: {config.fail_fast}")
print(f"Verbose: {config.verbose}")
print(f"Report Formats: {', '.join(config.report_formats)}")
print(f"Report Directory: {config.report_directory}")
elif args.config_action == "validate":
issues = config.validate()
if issues:
print("Configuration Issues:")
for issue in issues:
print(f"{issue}")
return 1
else:
print("✅ Configuration is valid")
elif args.config_action == "generate":
config.save_to_file(args.output)
print(f"✅ Configuration saved to: {args.output}")
return 0
def main():
"""Main CLI entry point."""
cli = TestRunnerCLI()
exit_code = asyncio.run(cli.run())
sys.exit(exit_code)
if __name__ == "__main__":
main()