""" Test Reporting System Comprehensive test result reporting with support for multiple output formats, detailed analysis, trend tracking, and CI/CD integration. """ import json import xml.etree.ElementTree as ET from datetime import datetime, timedelta from pathlib import Path from typing import Dict, List, Optional, Any import logging from dataclasses import asdict from enum import Enum from .models import TestRunResult, ExecutionResult, TestCategory, ReportFormat # ReportFormat is now imported from models module class TestReporter: """ Comprehensive test result reporter. Generates detailed reports in multiple formats with historical tracking, performance analysis, and CI/CD integration support. """ def __init__(self, config): """ Initialize test reporter. Args: config: Test configuration object """ self.config = config self.logger = logging.getLogger("TestReporter") # Report output directories self.reports_dir = Path("test_reports") self.reports_dir.mkdir(exist_ok=True) # Historical data self.history_file = self.reports_dir / "test_history.json" async def generate_report( self, result: TestRunResult, format_type: ReportFormat, output_path: Optional[Path] = None ) -> Path: """ Generate a test report in the specified format. Args: result: Test run results to report format_type: Format for the report output_path: Optional custom output path Returns: Path to the generated report file """ if format_type == ReportFormat.CONSOLE: return await self._generate_console_report(result) elif format_type == ReportFormat.HTML: return await self._generate_html_report(result, output_path) elif format_type == ReportFormat.JSON: return await self._generate_json_report(result, output_path) elif format_type == ReportFormat.JUNIT: return await self._generate_junit_report(result, output_path) elif format_type == ReportFormat.MARKDOWN: return await self._generate_markdown_report(result, output_path) elif format_type == ReportFormat.CSV: return await self._generate_csv_report(result, output_path) else: raise ValueError(f"Unsupported report format: {format_type}") async def _generate_console_report(self, result: TestRunResult) -> Path: """Generate console output report.""" # This is handled directly by the TestRunner's _log_summary method # Return a placeholder path return Path("/dev/stdout") async def _generate_html_report( self, result: TestRunResult, output_path: Optional[Path] = None ) -> Path: """Generate comprehensive HTML report.""" output_path = output_path or (self.reports_dir / f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.html") # Generate HTML content html_content = self._create_html_report(result) # Write to file with open(output_path, 'w', encoding='utf-8') as f: f.write(html_content) self.logger.info(f"HTML report generated: {output_path}") return output_path async def _generate_json_report( self, result: TestRunResult, output_path: Optional[Path] = None ) -> Path: """Generate JSON report.""" output_path = output_path or (self.reports_dir / f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json") # Create comprehensive JSON report report_data = { "timestamp": datetime.now().isoformat(), "summary": { "success": result.success, "total_tests": result.total_tests, "passed_tests": result.passed_tests, "failed_tests": result.failed_tests, "skipped_tests": result.skipped_tests, "error_tests": result.error_tests, "execution_time": result.execution_time, "coverage_percentage": result.coverage_percentage, "pass_rate": (result.passed_tests / max(result.total_tests, 1)) * 100 }, "results_by_category": { category.value: { "total_tests": exec_result.total_tests, "passed": exec_result.passed, "failed": exec_result.failed, "skipped": exec_result.skipped, "errors": exec_result.errors, "execution_time": exec_result.execution_time, "pass_rate": exec_result.pass_rate } for category, exec_result in result.results_by_category.items() }, "failure_details": result.failure_details, "performance_metrics": result.performance_metrics, "environment": { "python_version": self._get_python_version(), "platform": self._get_platform_info() } } # Write JSON report with open(output_path, 'w', encoding='utf-8') as f: json.dump(report_data, f, indent=2, default=str) # Update historical data await self._update_history(report_data) self.logger.info(f"JSON report generated: {output_path}") return output_path async def _generate_junit_report( self, result: TestRunResult, output_path: Optional[Path] = None ) -> Path: """Generate JUnit XML report for CI/CD integration.""" output_path = output_path or (self.reports_dir / f"junit_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xml") # Create JUnit XML structure testsuites = ET.Element("testsuites") testsuites.set("name", "YouTube Summarizer Tests") testsuites.set("tests", str(result.total_tests)) testsuites.set("failures", str(result.failed_tests)) testsuites.set("errors", str(result.error_tests)) testsuites.set("skipped", str(result.skipped_tests)) testsuites.set("time", f"{result.execution_time:.3f}") # Create test suites for each category for category, exec_result in result.results_by_category.items(): testsuite = ET.SubElement(testsuites, "testsuite") testsuite.set("name", f"{category.value}_tests") testsuite.set("tests", str(exec_result.total_tests)) testsuite.set("failures", str(exec_result.failed)) testsuite.set("errors", str(exec_result.errors)) testsuite.set("skipped", str(exec_result.skipped)) testsuite.set("time", f"{exec_result.execution_time:.3f}") # Add individual test cases (simplified) for i in range(exec_result.total_tests): testcase = ET.SubElement(testsuite, "testcase") testcase.set("name", f"test_{i+1}") testcase.set("classname", f"{category.value}.TestClass") testcase.set("time", "1.0") # Add failures if any (simplified) if i < exec_result.failed and exec_result.error_details: failure = ET.SubElement(testcase, "failure") failure.set("type", "TestFailure") if i < len(exec_result.error_details): failure.text = exec_result.error_details[i].get("error", "Test failed") # Write XML tree = ET.ElementTree(testsuites) tree.write(output_path, encoding='utf-8', xml_declaration=True) self.logger.info(f"JUnit report generated: {output_path}") return output_path async def _generate_markdown_report( self, result: TestRunResult, output_path: Optional[Path] = None ) -> Path: """Generate Markdown report.""" output_path = output_path or (self.reports_dir / f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md") # Generate markdown content md_content = self._create_markdown_report(result) # Write to file with open(output_path, 'w', encoding='utf-8') as f: f.write(md_content) self.logger.info(f"Markdown report generated: {output_path}") return output_path async def _generate_csv_report( self, result: TestRunResult, output_path: Optional[Path] = None ) -> Path: """Generate CSV report for data analysis.""" output_path = output_path or (self.reports_dir / f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv") # Create CSV content import csv with open(output_path, 'w', newline='', encoding='utf-8') as f: writer = csv.writer(f) # Header writer.writerow([ "Category", "Total", "Passed", "Failed", "Skipped", "Errors", "Execution Time", "Pass Rate", "Timestamp" ]) # Data rows timestamp = datetime.now().isoformat() # Overall summary writer.writerow([ "OVERALL", result.total_tests, result.passed_tests, result.failed_tests, result.skipped_tests, result.error_tests, f"{result.execution_time:.2f}", f"{(result.passed_tests / max(result.total_tests, 1)) * 100:.1f}%", timestamp ]) # Category details for category, exec_result in result.results_by_category.items(): writer.writerow([ category.value, exec_result.total_tests, exec_result.passed, exec_result.failed, exec_result.skipped, exec_result.errors, f"{exec_result.execution_time:.2f}", f"{exec_result.pass_rate:.1f}%", timestamp ]) self.logger.info(f"CSV report generated: {output_path}") return output_path def _create_html_report(self, result: TestRunResult) -> str: """Create comprehensive HTML report.""" status_class = "success" if result.success else "failure" status_text = "PASSED" if result.success else "FAILED" # Generate category breakdown category_rows = "" for category, exec_result in result.results_by_category.items(): row_class = "success" if exec_result.success else "failure" category_rows += f"""
Status: {status_text}
Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
Total Execution Time: {result.execution_time:.2f} seconds
{f'Coverage: {result.coverage_percentage:.1f}%
' if result.coverage_percentage else ''}Total Tests
Passed
Failed
Skipped
Errors
| Category | Total | Passed | Failed | Skipped | Errors | Time | Pass Rate |
|---|