419 lines
15 KiB
Python
Executable File
419 lines
15 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
Test Setup Validation Script
|
|
|
|
Validates that the test runner environment is correctly configured
|
|
and all components are working properly.
|
|
"""
|
|
|
|
import asyncio
|
|
import sys
|
|
import os
|
|
import subprocess
|
|
from pathlib import Path
|
|
from typing import Dict, List, Optional, Any
|
|
import json
|
|
|
|
|
|
class TestSetupValidator:
|
|
"""Validates test runner setup and configuration."""
|
|
|
|
def __init__(self):
|
|
self.project_root = Path.cwd()
|
|
self.issues = []
|
|
self.warnings = []
|
|
self.successes = []
|
|
|
|
def add_success(self, message: str):
|
|
"""Add success message."""
|
|
self.successes.append(f"✅ {message}")
|
|
|
|
def add_warning(self, message: str):
|
|
"""Add warning message."""
|
|
self.warnings.append(f"⚠️ {message}")
|
|
|
|
def add_issue(self, message: str):
|
|
"""Add critical issue."""
|
|
self.issues.append(f"❌ {message}")
|
|
|
|
def validate_project_structure(self) -> bool:
|
|
"""Validate project directory structure."""
|
|
print("🔍 Validating project structure...")
|
|
|
|
required_files = [
|
|
"backend/main.py",
|
|
"backend/test_runner/__init__.py",
|
|
"backend/test_runner/cli.py",
|
|
"backend/test_runner/core/test_runner.py",
|
|
"pytest.ini",
|
|
".coveragerc"
|
|
]
|
|
|
|
required_dirs = [
|
|
"backend/test_runner",
|
|
"backend/test_runner/core",
|
|
"backend/test_runner/config",
|
|
"backend/test_runner/utils",
|
|
"backend/tests",
|
|
"test_reports"
|
|
]
|
|
|
|
success = True
|
|
|
|
# Check files
|
|
for file_path in required_files:
|
|
full_path = self.project_root / file_path
|
|
if full_path.exists():
|
|
self.add_success(f"Found required file: {file_path}")
|
|
else:
|
|
self.add_issue(f"Missing required file: {file_path}")
|
|
success = False
|
|
|
|
# Check directories
|
|
for dir_path in required_dirs:
|
|
full_path = self.project_root / dir_path
|
|
if full_path.exists() and full_path.is_dir():
|
|
self.add_success(f"Found required directory: {dir_path}")
|
|
else:
|
|
self.add_issue(f"Missing required directory: {dir_path}")
|
|
success = False
|
|
|
|
return success
|
|
|
|
def validate_python_environment(self) -> bool:
|
|
"""Validate Python version and virtual environment."""
|
|
print("🐍 Validating Python environment...")
|
|
|
|
# Check Python version
|
|
version = sys.version_info
|
|
if version.major == 3 and version.minor >= 11:
|
|
self.add_success(f"Python version: {version.major}.{version.minor}.{version.micro}")
|
|
else:
|
|
self.add_warning(f"Python {version.major}.{version.minor} found, 3.11+ recommended")
|
|
|
|
# Check virtual environment
|
|
if hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix):
|
|
self.add_success("Virtual environment active")
|
|
else:
|
|
self.add_warning("Virtual environment not detected")
|
|
|
|
return True
|
|
|
|
def validate_dependencies(self) -> bool:
|
|
"""Validate required Python packages."""
|
|
print("📦 Validating dependencies...")
|
|
|
|
required_packages = [
|
|
"pytest",
|
|
"pytest-asyncio",
|
|
"pytest-cov",
|
|
"pytest-xdist",
|
|
"pytest-timeout",
|
|
"pytest-mock",
|
|
"httpx",
|
|
"fastapi",
|
|
"uvicorn"
|
|
]
|
|
|
|
success = True
|
|
|
|
for package in required_packages:
|
|
try:
|
|
__import__(package.replace('-', '_'))
|
|
self.add_success(f"Package available: {package}")
|
|
except ImportError:
|
|
self.add_issue(f"Missing package: {package}")
|
|
success = False
|
|
|
|
return success
|
|
|
|
def validate_test_runner_import(self) -> bool:
|
|
"""Validate test runner module import."""
|
|
print("🔧 Validating test runner import...")
|
|
|
|
try:
|
|
# Test basic imports
|
|
from backend.test_runner.cli import TestCLI
|
|
from backend.test_runner.core.test_runner import TestRunner
|
|
from backend.test_runner.config.test_config import TestConfig
|
|
|
|
self.add_success("Test runner modules import successfully")
|
|
|
|
# Test configuration loading
|
|
config = TestConfig.load_default(self.project_root)
|
|
self.add_success("Test configuration loads successfully")
|
|
|
|
return True
|
|
|
|
except ImportError as e:
|
|
self.add_issue(f"Failed to import test runner: {e}")
|
|
return False
|
|
except Exception as e:
|
|
self.add_issue(f"Test runner error: {e}")
|
|
return False
|
|
|
|
def validate_pytest_configuration(self) -> bool:
|
|
"""Validate pytest configuration."""
|
|
print("⚙️ Validating pytest configuration...")
|
|
|
|
# Check pytest.ini
|
|
pytest_ini = self.project_root / "pytest.ini"
|
|
if pytest_ini.exists():
|
|
self.add_success("pytest.ini configuration found")
|
|
|
|
content = pytest_ini.read_text()
|
|
if "testpaths" in content:
|
|
self.add_success("Test paths configured")
|
|
else:
|
|
self.add_warning("Test paths not configured in pytest.ini")
|
|
|
|
if "markers" in content:
|
|
self.add_success("Test markers configured")
|
|
else:
|
|
self.add_warning("Test markers not configured")
|
|
|
|
else:
|
|
self.add_issue("pytest.ini configuration missing")
|
|
return False
|
|
|
|
# Check .coveragerc
|
|
coveragerc = self.project_root / ".coveragerc"
|
|
if coveragerc.exists():
|
|
self.add_success("Coverage configuration found")
|
|
else:
|
|
self.add_warning("Coverage configuration missing")
|
|
|
|
return True
|
|
|
|
def validate_test_discovery(self) -> bool:
|
|
"""Validate test discovery."""
|
|
print("🔎 Validating test discovery...")
|
|
|
|
try:
|
|
# Run test discovery
|
|
result = subprocess.run([
|
|
sys.executable, "-m", "pytest", "--collect-only", "-q"
|
|
], cwd=self.project_root, capture_output=True, text=True, timeout=30)
|
|
|
|
if result.returncode == 0:
|
|
# Parse output to count tests
|
|
lines = result.stdout.split('\n')
|
|
test_count = 0
|
|
for line in lines:
|
|
if 'collected' in line and 'items' in line:
|
|
# Extract number from line like "collected 45 items"
|
|
parts = line.split()
|
|
for i, part in enumerate(parts):
|
|
if part == 'collected' and i + 1 < len(parts):
|
|
try:
|
|
test_count = int(parts[i + 1])
|
|
break
|
|
except ValueError:
|
|
pass
|
|
|
|
if test_count > 0:
|
|
self.add_success(f"Discovered {test_count} tests")
|
|
else:
|
|
self.add_warning("No tests discovered")
|
|
|
|
else:
|
|
self.add_issue(f"Test discovery failed: {result.stderr}")
|
|
return False
|
|
|
|
except subprocess.TimeoutExpired:
|
|
self.add_issue("Test discovery timed out")
|
|
return False
|
|
except Exception as e:
|
|
self.add_issue(f"Test discovery error: {e}")
|
|
return False
|
|
|
|
return True
|
|
|
|
def validate_sample_test_execution(self) -> bool:
|
|
"""Validate sample test execution."""
|
|
print("🧪 Validating sample test execution...")
|
|
|
|
try:
|
|
# Run a simple test
|
|
result = subprocess.run([
|
|
sys.executable, "-m", "pytest",
|
|
"backend/tests/unit/test_youtube_service.py::TestYouTubeService::test_extract_video_id",
|
|
"-v", "--tb=short"
|
|
], cwd=self.project_root, capture_output=True, text=True, timeout=60)
|
|
|
|
if result.returncode == 0:
|
|
self.add_success("Sample test execution successful")
|
|
return True
|
|
else:
|
|
self.add_warning(f"Sample test failed (this may be expected): {result.stderr}")
|
|
return True # Don't fail validation for this
|
|
|
|
except subprocess.TimeoutExpired:
|
|
self.add_warning("Sample test execution timed out")
|
|
return True
|
|
except Exception as e:
|
|
self.add_warning(f"Sample test execution error: {e}")
|
|
return True
|
|
|
|
def validate_test_runner_cli(self) -> bool:
|
|
"""Validate test runner CLI."""
|
|
print("💻 Validating test runner CLI...")
|
|
|
|
try:
|
|
# Test CLI help
|
|
result = subprocess.run([
|
|
sys.executable, "-m", "backend.test_runner", "--help"
|
|
], cwd=self.project_root, capture_output=True, text=True, timeout=30)
|
|
|
|
if result.returncode == 0:
|
|
self.add_success("Test runner CLI responds to --help")
|
|
|
|
# Check for expected commands
|
|
if "run-all" in result.stdout:
|
|
self.add_success("run-all command available")
|
|
else:
|
|
self.add_warning("run-all command not found in help")
|
|
|
|
if "run-unit" in result.stdout:
|
|
self.add_success("run-unit command available")
|
|
else:
|
|
self.add_warning("run-unit command not found in help")
|
|
|
|
return True
|
|
else:
|
|
self.add_issue(f"Test runner CLI failed: {result.stderr}")
|
|
return False
|
|
|
|
except subprocess.TimeoutExpired:
|
|
self.add_issue("Test runner CLI timed out")
|
|
return False
|
|
except Exception as e:
|
|
self.add_issue(f"Test runner CLI error: {e}")
|
|
return False
|
|
|
|
def validate_convenience_scripts(self) -> bool:
|
|
"""Validate convenience scripts."""
|
|
print("📜 Validating convenience scripts...")
|
|
|
|
scripts = [
|
|
"scripts/setup_test_env.sh",
|
|
"run_tests.sh"
|
|
]
|
|
|
|
for script in scripts:
|
|
script_path = self.project_root / script
|
|
if script_path.exists():
|
|
if os.access(script_path, os.X_OK):
|
|
self.add_success(f"Script executable: {script}")
|
|
else:
|
|
self.add_warning(f"Script not executable: {script}")
|
|
else:
|
|
self.add_warning(f"Script missing: {script}")
|
|
|
|
return True
|
|
|
|
async def run_validation(self) -> Dict[str, Any]:
|
|
"""Run complete validation suite."""
|
|
print("🚀 Starting test setup validation...\n")
|
|
|
|
# Run all validation checks
|
|
validations = [
|
|
("Project Structure", self.validate_project_structure),
|
|
("Python Environment", self.validate_python_environment),
|
|
("Dependencies", self.validate_dependencies),
|
|
("Test Runner Import", self.validate_test_runner_import),
|
|
("pytest Configuration", self.validate_pytest_configuration),
|
|
("Test Discovery", self.validate_test_discovery),
|
|
("Sample Test Execution", self.validate_sample_test_execution),
|
|
("Test Runner CLI", self.validate_test_runner_cli),
|
|
("Convenience Scripts", self.validate_convenience_scripts)
|
|
]
|
|
|
|
results = {}
|
|
overall_success = True
|
|
|
|
for name, validator in validations:
|
|
try:
|
|
success = validator()
|
|
results[name] = success
|
|
if not success:
|
|
overall_success = False
|
|
except Exception as e:
|
|
self.add_issue(f"{name} validation failed with exception: {e}")
|
|
results[name] = False
|
|
overall_success = False
|
|
print() # Add spacing between sections
|
|
|
|
return {
|
|
"overall_success": overall_success,
|
|
"results": results,
|
|
"successes": self.successes,
|
|
"warnings": self.warnings,
|
|
"issues": self.issues
|
|
}
|
|
|
|
def print_summary(self, validation_results: Dict[str, Any]):
|
|
"""Print validation summary."""
|
|
print("="*60)
|
|
print("🏁 VALIDATION SUMMARY")
|
|
print("="*60)
|
|
|
|
# Print successes
|
|
if self.successes:
|
|
print(f"\n✅ SUCCESSES ({len(self.successes)}):")
|
|
for success in self.successes:
|
|
print(f" {success}")
|
|
|
|
# Print warnings
|
|
if self.warnings:
|
|
print(f"\n⚠️ WARNINGS ({len(self.warnings)}):")
|
|
for warning in self.warnings:
|
|
print(f" {warning}")
|
|
|
|
# Print issues
|
|
if self.issues:
|
|
print(f"\n❌ ISSUES ({len(self.issues)}):")
|
|
for issue in self.issues:
|
|
print(f" {issue}")
|
|
|
|
# Overall status
|
|
print(f"\n🎯 OVERALL STATUS:")
|
|
if validation_results["overall_success"]:
|
|
print(" ✅ Test runner setup is VALID and ready to use!")
|
|
print(" 🚀 Run './run_tests.sh run-all' to start testing")
|
|
else:
|
|
print(" ❌ Test runner setup has ISSUES that need attention")
|
|
print(" 🔧 Please resolve the issues above before proceeding")
|
|
|
|
print(f"\n📊 STATISTICS:")
|
|
print(f" ✅ Successes: {len(self.successes)}")
|
|
print(f" ⚠️ Warnings: {len(self.warnings)}")
|
|
print(f" ❌ Issues: {len(self.issues)}")
|
|
|
|
print("="*60)
|
|
|
|
|
|
async def main():
|
|
"""Main validation entry point."""
|
|
validator = TestSetupValidator()
|
|
|
|
try:
|
|
results = await validator.run_validation()
|
|
validator.print_summary(results)
|
|
|
|
# Exit with appropriate code
|
|
if results["overall_success"]:
|
|
return 0
|
|
else:
|
|
return 1
|
|
|
|
except KeyboardInterrupt:
|
|
print("\n⛔ Validation interrupted by user")
|
|
return 1
|
|
except Exception as e:
|
|
print(f"\n💥 Validation failed with error: {e}")
|
|
return 1
|
|
|
|
|
|
if __name__ == "__main__":
|
|
exit(asyncio.run(main())) |