213 lines
5.9 KiB
Python
213 lines
5.9 KiB
Python
"""
|
|
Test Runner Data Models
|
|
|
|
Core data structures and enums used throughout the test runner.
|
|
"""
|
|
|
|
from dataclasses import dataclass, field
|
|
from enum import Enum
|
|
from pathlib import Path
|
|
from typing import List, Dict, Optional, Any, Set
|
|
import datetime
|
|
|
|
|
|
class TestRunnerMode(Enum):
|
|
"""Test execution modes."""
|
|
ALL = "all"
|
|
UNIT = "unit"
|
|
INTEGRATION = "integration"
|
|
API = "api"
|
|
FRONTEND = "frontend"
|
|
E2E = "e2e"
|
|
PERFORMANCE = "performance"
|
|
COVERAGE = "coverage"
|
|
SPECIFIC = "specific"
|
|
CHANGED = "changed"
|
|
|
|
|
|
class TestCategory(Enum):
|
|
"""Test categories for classification."""
|
|
UNIT = "unit"
|
|
INTEGRATION = "integration"
|
|
API = "api"
|
|
FRONTEND = "frontend"
|
|
E2E = "e2e"
|
|
PERFORMANCE = "performance"
|
|
DATABASE = "database"
|
|
NETWORK = "network"
|
|
AUTH = "auth"
|
|
PIPELINE = "pipeline"
|
|
|
|
|
|
class TestStatus(Enum):
|
|
"""Test execution status."""
|
|
PENDING = "pending"
|
|
RUNNING = "running"
|
|
PASSED = "passed"
|
|
FAILED = "failed"
|
|
SKIPPED = "skipped"
|
|
ERROR = "error"
|
|
|
|
|
|
class ReportFormat(Enum):
|
|
"""Report output formats."""
|
|
CONSOLE = "console"
|
|
HTML = "html"
|
|
JSON = "json"
|
|
JUNIT = "junit"
|
|
MARKDOWN = "markdown"
|
|
CSV = "csv"
|
|
TEXT = "text"
|
|
|
|
|
|
@dataclass
|
|
class TestInfo:
|
|
"""Information about a single test."""
|
|
name: str
|
|
file_path: Path
|
|
category: TestCategory
|
|
class_name: Optional[str] = None
|
|
function_name: Optional[str] = None
|
|
markers: List[str] = field(default_factory=list)
|
|
dependencies: List[str] = field(default_factory=list)
|
|
estimated_duration: float = 1.0
|
|
requires_database: bool = False
|
|
requires_network: bool = False
|
|
requires_auth: bool = False
|
|
description: Optional[str] = None
|
|
|
|
|
|
@dataclass
|
|
class TestSuite:
|
|
"""Collection of tests in a category."""
|
|
category: TestCategory
|
|
name: str = ""
|
|
tests: List[TestInfo] = field(default_factory=list)
|
|
total_estimated_duration: float = 0.0
|
|
setup_requirements: Set[str] = field(default_factory=set)
|
|
setup_commands: List[str] = field(default_factory=list)
|
|
teardown_commands: List[str] = field(default_factory=list)
|
|
environment: Dict[str, str] = field(default_factory=dict)
|
|
|
|
def __post_init__(self):
|
|
"""Calculate derived properties."""
|
|
# Set default name if not provided
|
|
if not self.name:
|
|
self.name = f"{self.category.value}_tests"
|
|
|
|
self.total_estimated_duration = sum(test.estimated_duration for test in self.tests)
|
|
|
|
# Aggregate requirements
|
|
for test in self.tests:
|
|
if test.requires_database:
|
|
self.setup_requirements.add("database")
|
|
if test.requires_network:
|
|
self.setup_requirements.add("network")
|
|
if test.requires_auth:
|
|
self.setup_requirements.add("auth")
|
|
|
|
@property
|
|
def test_count(self) -> int:
|
|
"""Get number of tests in suite."""
|
|
return len(self.tests)
|
|
|
|
@property
|
|
def estimated_duration(self) -> float:
|
|
"""Get estimated total duration."""
|
|
return sum(test.estimated_duration or 1.0 for test in self.tests)
|
|
|
|
|
|
@dataclass
|
|
class ExecutionResult:
|
|
"""Result of test suite execution."""
|
|
success: bool
|
|
total_tests: int
|
|
passed: int
|
|
failed: int
|
|
skipped: int
|
|
errors: int
|
|
execution_time: float
|
|
output: str = ""
|
|
error_details: List[Dict[str, Any]] = field(default_factory=list)
|
|
coverage_data: Optional[Dict[str, Any]] = None
|
|
suite: Optional[TestSuite] = None
|
|
status: Optional[TestStatus] = None
|
|
start_time: Optional[datetime.datetime] = None
|
|
end_time: Optional[datetime.datetime] = None
|
|
failures: List[Dict[str, Any]] = field(default_factory=list)
|
|
|
|
@property
|
|
def success_rate(self) -> float:
|
|
"""Get success rate as percentage."""
|
|
return (self.passed / self.total_tests * 100) if self.total_tests > 0 else 0.0
|
|
|
|
@property
|
|
def is_success(self) -> bool:
|
|
"""Check if execution was successful."""
|
|
return self.success
|
|
|
|
|
|
@dataclass
|
|
class TestRunResult:
|
|
"""Complete test run results."""
|
|
success: bool
|
|
total_tests: int
|
|
passed_tests: int
|
|
failed_tests: int
|
|
skipped_tests: int
|
|
error_tests: int
|
|
execution_time: float
|
|
coverage_percentage: Optional[float] = None
|
|
results_by_category: Dict[TestCategory, ExecutionResult] = field(default_factory=dict)
|
|
failure_details: List[Dict] = field(default_factory=list)
|
|
performance_metrics: Dict[str, float] = field(default_factory=dict)
|
|
mode: Optional[TestRunnerMode] = None
|
|
start_time: Optional[datetime.datetime] = None
|
|
end_time: Optional[datetime.datetime] = None
|
|
|
|
@property
|
|
def success_rate(self) -> float:
|
|
"""Get overall success rate."""
|
|
if self.total_tests == 0:
|
|
return 0.0
|
|
return (self.passed_tests / self.total_tests * 100)
|
|
|
|
@property
|
|
def all_failures(self) -> List[Dict[str, Any]]:
|
|
"""Get all failures across categories."""
|
|
failures = []
|
|
for result in self.results_by_category.values():
|
|
failures.extend(result.failures)
|
|
return failures
|
|
|
|
|
|
@dataclass
|
|
class CoverageInfo:
|
|
"""Coverage information for a file or module."""
|
|
file_path: str
|
|
lines_covered: int
|
|
lines_total: int
|
|
coverage_percentage: float
|
|
missing_lines: List[int] = field(default_factory=list)
|
|
excluded_lines: List[int] = field(default_factory=list)
|
|
|
|
|
|
@dataclass
|
|
class CoverageReport:
|
|
"""Complete coverage report."""
|
|
overall_percentage: float
|
|
files: List[CoverageInfo] = field(default_factory=list)
|
|
total_lines: int = 0
|
|
covered_lines: int = 0
|
|
missing_lines: int = 0
|
|
timestamp: datetime.datetime = field(default_factory=datetime.datetime.now)
|
|
|
|
|
|
@dataclass
|
|
class ReportMetadata:
|
|
"""Metadata for generated reports."""
|
|
format: ReportFormat
|
|
file_path: Path
|
|
generated_at: datetime.datetime
|
|
test_run_id: str
|
|
summary: Dict[str, Any] = field(default_factory=dict) |