414 lines
14 KiB
Python
414 lines
14 KiB
Python
"""
|
|
Test Configuration Management
|
|
|
|
Centralized configuration management for the test runner with support for
|
|
different environments, custom settings, and configuration validation.
|
|
"""
|
|
|
|
import json
|
|
import logging
|
|
import os
|
|
from dataclasses import dataclass, field
|
|
from pathlib import Path
|
|
from typing import Dict, List, Optional, Any, Union
|
|
import yaml
|
|
|
|
|
|
@dataclass
|
|
class TestConfig:
|
|
"""Test runner configuration."""
|
|
|
|
# Execution settings
|
|
parallel_execution: bool = True
|
|
max_workers: int = 4
|
|
fail_fast: bool = False
|
|
verbose: bool = False
|
|
|
|
# Coverage settings
|
|
enable_coverage: bool = True
|
|
coverage_threshold: float = 80.0
|
|
coverage_sources: List[str] = field(default_factory=lambda: ["backend"])
|
|
coverage_omit: List[str] = field(default_factory=lambda: ["*/tests/*", "*/test_*", "*/__pycache__/*"])
|
|
|
|
# Test discovery
|
|
test_patterns: List[str] = field(default_factory=lambda: ["test_*.py", "*_test.py"])
|
|
ignore_patterns: List[str] = field(default_factory=lambda: ["__pycache__", "*.pyc"])
|
|
|
|
# Database settings
|
|
test_database_url: str = "sqlite:///:memory:"
|
|
use_transactions: bool = True
|
|
auto_cleanup: bool = True
|
|
|
|
# Timeout settings
|
|
test_timeout: float = 300.0 # 5 minutes
|
|
setup_timeout: float = 60.0 # 1 minute
|
|
teardown_timeout: float = 30.0 # 30 seconds
|
|
|
|
# Reporting settings
|
|
report_formats: List[str] = field(default_factory=lambda: ["console", "json"])
|
|
report_directory: str = "test_reports"
|
|
keep_reports: int = 10 # Number of reports to keep
|
|
|
|
# Environment settings
|
|
test_env_vars: Dict[str, str] = field(default_factory=dict)
|
|
required_env_vars: List[str] = field(default_factory=list)
|
|
|
|
# Retry settings
|
|
retry_failed_tests: bool = True
|
|
max_retries: int = 2
|
|
retry_delay: float = 1.0
|
|
|
|
# Logging
|
|
log_level: str = "INFO"
|
|
log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
|
|
# Performance settings
|
|
performance_tracking: bool = True
|
|
memory_profiling: bool = False
|
|
benchmark_mode: bool = False
|
|
|
|
# Integration settings
|
|
mock_external_apis: bool = True
|
|
network_timeout: float = 30.0
|
|
|
|
# Frontend testing
|
|
frontend_test_command: str = "npm test"
|
|
frontend_test_timeout: float = 120.0
|
|
|
|
@classmethod
|
|
def load_default(cls, project_root: Path) -> 'TestConfig':
|
|
"""
|
|
Load default configuration with project-specific overrides.
|
|
|
|
Args:
|
|
project_root: Project root directory
|
|
|
|
Returns:
|
|
TestConfig instance with loaded configuration
|
|
"""
|
|
config = cls()
|
|
|
|
# Look for configuration files
|
|
config_files = [
|
|
project_root / "pytest.ini",
|
|
project_root / "pyproject.toml",
|
|
project_root / "test_config.json",
|
|
project_root / "test_config.yaml",
|
|
project_root / ".test_runner_config.json"
|
|
]
|
|
|
|
for config_file in config_files:
|
|
if config_file.exists():
|
|
try:
|
|
config._load_from_file(config_file)
|
|
break
|
|
except Exception as e:
|
|
logging.warning(f"Failed to load config from {config_file}: {e}")
|
|
|
|
# Apply environment variable overrides
|
|
config._apply_env_overrides()
|
|
|
|
return config
|
|
|
|
def _load_from_file(self, config_file: Path) -> None:
|
|
"""Load configuration from a file."""
|
|
|
|
if config_file.name == "pytest.ini":
|
|
self._load_from_pytest_ini(config_file)
|
|
elif config_file.name == "pyproject.toml":
|
|
self._load_from_pyproject_toml(config_file)
|
|
elif config_file.suffix == ".json":
|
|
self._load_from_json(config_file)
|
|
elif config_file.suffix in [".yaml", ".yml"]:
|
|
self._load_from_yaml(config_file)
|
|
|
|
def _load_from_pytest_ini(self, config_file: Path) -> None:
|
|
"""Load configuration from pytest.ini file."""
|
|
|
|
import configparser
|
|
|
|
config = configparser.ConfigParser()
|
|
config.read(config_file)
|
|
|
|
if "tool:pytest" in config:
|
|
pytest_config = config["tool:pytest"]
|
|
|
|
# Test discovery
|
|
if "testpaths" in pytest_config:
|
|
# This would affect test discovery logic
|
|
pass
|
|
|
|
# Markers and options
|
|
if "markers" in pytest_config:
|
|
# Handle custom markers
|
|
pass
|
|
|
|
def _load_from_pyproject_toml(self, config_file: Path) -> None:
|
|
"""Load configuration from pyproject.toml file."""
|
|
|
|
try:
|
|
import tomllib
|
|
except ImportError:
|
|
try:
|
|
import tomli as tomllib
|
|
except ImportError:
|
|
logging.warning("TOML support not available. Install tomli package.")
|
|
return
|
|
|
|
with open(config_file, "rb") as f:
|
|
data = tomllib.load(f)
|
|
|
|
# Look for test runner configuration
|
|
if "tool" in data and "test_runner" in data["tool"]:
|
|
test_config = data["tool"]["test_runner"]
|
|
self._update_from_dict(test_config)
|
|
|
|
# Also check for pytest configuration
|
|
if "tool" in data and "pytest" in data["tool"]:
|
|
pytest_config = data["tool"]["pytest"]
|
|
self._apply_pytest_config(pytest_config)
|
|
|
|
def _load_from_json(self, config_file: Path) -> None:
|
|
"""Load configuration from JSON file."""
|
|
|
|
with open(config_file, "r") as f:
|
|
data = json.load(f)
|
|
|
|
self._update_from_dict(data)
|
|
|
|
def _load_from_yaml(self, config_file: Path) -> None:
|
|
"""Load configuration from YAML file."""
|
|
|
|
try:
|
|
with open(config_file, "r") as f:
|
|
data = yaml.safe_load(f)
|
|
|
|
if data:
|
|
self._update_from_dict(data)
|
|
except ImportError:
|
|
logging.warning("YAML support not available. Install PyYAML package.")
|
|
except Exception as e:
|
|
logging.warning(f"Failed to load YAML config: {e}")
|
|
|
|
def _update_from_dict(self, data: Dict[str, Any]) -> None:
|
|
"""Update configuration from dictionary data."""
|
|
|
|
for key, value in data.items():
|
|
if hasattr(self, key):
|
|
# Validate type compatibility
|
|
current_value = getattr(self, key)
|
|
if isinstance(current_value, type(value)) or current_value is None:
|
|
setattr(self, key, value)
|
|
else:
|
|
logging.warning(f"Type mismatch for config key '{key}': "
|
|
f"expected {type(current_value)}, got {type(value)}")
|
|
|
|
def _apply_pytest_config(self, pytest_config: Dict[str, Any]) -> None:
|
|
"""Apply pytest-specific configuration."""
|
|
|
|
# Map pytest options to test runner config
|
|
if "timeout" in pytest_config:
|
|
self.test_timeout = float(pytest_config["timeout"])
|
|
|
|
if "maxfail" in pytest_config and pytest_config["maxfail"] == 1:
|
|
self.fail_fast = True
|
|
|
|
if "verbose" in pytest_config or "-v" in pytest_config.get("addopts", ""):
|
|
self.verbose = True
|
|
|
|
def _apply_env_overrides(self) -> None:
|
|
"""Apply environment variable overrides."""
|
|
|
|
env_mappings = {
|
|
"TEST_PARALLEL": ("parallel_execution", bool),
|
|
"TEST_MAX_WORKERS": ("max_workers", int),
|
|
"TEST_FAIL_FAST": ("fail_fast", bool),
|
|
"TEST_VERBOSE": ("verbose", bool),
|
|
"TEST_COVERAGE": ("enable_coverage", bool),
|
|
"TEST_COVERAGE_THRESHOLD": ("coverage_threshold", float),
|
|
"TEST_DATABASE_URL": ("test_database_url", str),
|
|
"TEST_TIMEOUT": ("test_timeout", float),
|
|
"TEST_LOG_LEVEL": ("log_level", str),
|
|
"TEST_RETRY_FAILED": ("retry_failed_tests", bool),
|
|
"TEST_MAX_RETRIES": ("max_retries", int),
|
|
"TEST_MOCK_APIS": ("mock_external_apis", bool)
|
|
}
|
|
|
|
for env_var, (attr_name, attr_type) in env_mappings.items():
|
|
env_value = os.environ.get(env_var)
|
|
if env_value is not None:
|
|
try:
|
|
if attr_type == bool:
|
|
value = env_value.lower() in ("true", "1", "yes", "on")
|
|
else:
|
|
value = attr_type(env_value)
|
|
|
|
setattr(self, attr_name, value)
|
|
except (ValueError, TypeError) as e:
|
|
logging.warning(f"Invalid value for {env_var}: {env_value} ({e})")
|
|
|
|
# Add custom test environment variables
|
|
for key, value in os.environ.items():
|
|
if key.startswith("TEST_ENV_"):
|
|
env_key = key[9:] # Remove TEST_ENV_ prefix
|
|
self.test_env_vars[env_key] = value
|
|
|
|
def validate(self) -> List[str]:
|
|
"""
|
|
Validate configuration and return list of issues.
|
|
|
|
Returns:
|
|
List of validation error messages
|
|
"""
|
|
issues = []
|
|
|
|
# Validate numeric ranges
|
|
if self.max_workers < 1:
|
|
issues.append("max_workers must be at least 1")
|
|
|
|
if not (0.0 <= self.coverage_threshold <= 100.0):
|
|
issues.append("coverage_threshold must be between 0 and 100")
|
|
|
|
if self.test_timeout <= 0:
|
|
issues.append("test_timeout must be positive")
|
|
|
|
if self.max_retries < 0:
|
|
issues.append("max_retries cannot be negative")
|
|
|
|
# Validate paths
|
|
report_dir = Path(self.report_directory)
|
|
try:
|
|
report_dir.mkdir(parents=True, exist_ok=True)
|
|
except Exception:
|
|
issues.append(f"Cannot create report directory: {self.report_directory}")
|
|
|
|
# Validate log level
|
|
valid_log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
|
if self.log_level.upper() not in valid_log_levels:
|
|
issues.append(f"log_level must be one of: {valid_log_levels}")
|
|
|
|
# Validate report formats
|
|
valid_formats = ["console", "html", "json", "junit", "markdown", "csv"]
|
|
for fmt in self.report_formats:
|
|
if fmt not in valid_formats:
|
|
issues.append(f"Invalid report format: {fmt}. Valid formats: {valid_formats}")
|
|
|
|
# Check required environment variables
|
|
for env_var in self.required_env_vars:
|
|
if env_var not in os.environ:
|
|
issues.append(f"Required environment variable missing: {env_var}")
|
|
|
|
return issues
|
|
|
|
def save_to_file(self, file_path: Path) -> None:
|
|
"""
|
|
Save configuration to a file.
|
|
|
|
Args:
|
|
file_path: Path to save configuration to
|
|
"""
|
|
|
|
# Convert to dictionary, excluding functions and private attributes
|
|
config_dict = {}
|
|
for key, value in self.__dict__.items():
|
|
if not key.startswith('_') and not callable(value):
|
|
config_dict[key] = value
|
|
|
|
if file_path.suffix == ".json":
|
|
with open(file_path, "w") as f:
|
|
json.dump(config_dict, f, indent=2, default=str)
|
|
elif file_path.suffix in [".yaml", ".yml"]:
|
|
with open(file_path, "w") as f:
|
|
yaml.dump(config_dict, f, default_flow_style=False)
|
|
else:
|
|
raise ValueError(f"Unsupported file format: {file_path.suffix}")
|
|
|
|
def setup_logging(self) -> None:
|
|
"""Setup logging based on configuration."""
|
|
|
|
# Get numeric log level
|
|
numeric_level = getattr(logging, self.log_level.upper(), logging.INFO)
|
|
|
|
# Configure root logger
|
|
logging.basicConfig(
|
|
level=numeric_level,
|
|
format=self.log_format,
|
|
force=True # Override existing configuration
|
|
)
|
|
|
|
# Set specific logger levels
|
|
loggers_to_configure = [
|
|
"TestRunner",
|
|
"TestDiscovery",
|
|
"TestExecutor",
|
|
"TestReporter"
|
|
]
|
|
|
|
for logger_name in loggers_to_configure:
|
|
logger = logging.getLogger(logger_name)
|
|
logger.setLevel(numeric_level)
|
|
|
|
def get_environment_config(self) -> Dict[str, str]:
|
|
"""
|
|
Get environment configuration for test execution.
|
|
|
|
Returns:
|
|
Dictionary of environment variables for test processes
|
|
"""
|
|
env_config = os.environ.copy()
|
|
|
|
# Add test-specific environment variables
|
|
env_config.update({
|
|
"TESTING": "true",
|
|
"TEST_DATABASE_URL": self.test_database_url,
|
|
"TEST_TIMEOUT": str(self.test_timeout),
|
|
"MOCK_EXTERNAL_APIS": str(self.mock_external_apis).lower(),
|
|
"NETWORK_TIMEOUT": str(self.network_timeout),
|
|
})
|
|
|
|
# Add custom test environment variables
|
|
env_config.update(self.test_env_vars)
|
|
|
|
return env_config
|
|
|
|
def to_pytest_args(self) -> List[str]:
|
|
"""
|
|
Convert configuration to pytest command line arguments.
|
|
|
|
Returns:
|
|
List of pytest command line arguments
|
|
"""
|
|
args = []
|
|
|
|
if self.verbose:
|
|
args.append("-v")
|
|
else:
|
|
args.append("-q")
|
|
|
|
if self.fail_fast:
|
|
args.append("-x")
|
|
|
|
if self.parallel_execution and self.max_workers > 1:
|
|
args.extend(["-n", str(self.max_workers)])
|
|
|
|
if self.enable_coverage:
|
|
for source in self.coverage_sources:
|
|
args.extend(["--cov", source])
|
|
|
|
args.append("--cov-report=term-missing")
|
|
|
|
if self.coverage_threshold > 0:
|
|
args.extend([f"--cov-fail-under={self.coverage_threshold}"])
|
|
|
|
# Add timeout
|
|
args.extend(["--timeout", str(int(self.test_timeout))])
|
|
|
|
return args
|
|
|
|
def __str__(self) -> str:
|
|
"""String representation of configuration."""
|
|
|
|
return f"TestConfig(parallel={self.parallel_execution}, " \
|
|
f"coverage={self.enable_coverage}, " \
|
|
f"workers={self.max_workers}, " \
|
|
f"timeout={self.test_timeout}s)" |