245 lines
9.0 KiB
Python
245 lines
9.0 KiB
Python
"""
|
|
Comprehensive Testing Suite for Trax Application
|
|
Tests the TestSuiteRunner and related infrastructure
|
|
"""
|
|
import asyncio
|
|
import tempfile
|
|
from pathlib import Path
|
|
from typing import Dict, List, Any
|
|
import pytest
|
|
import time
|
|
|
|
from src.services.protocols import (
|
|
TranscriptionServiceProtocol,
|
|
MediaServiceProtocol,
|
|
YouTubeServiceProtocol,
|
|
BatchProcessorProtocol
|
|
)
|
|
|
|
|
|
class TestComprehensiveTestingSuite:
|
|
"""Test the comprehensive testing suite infrastructure"""
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_suite_runner_initialization(self):
|
|
"""Test that the test suite runner initializes correctly"""
|
|
from tests.testing_suite import TestSuiteRunner
|
|
|
|
runner = TestSuiteRunner()
|
|
assert runner is not None
|
|
assert hasattr(runner, 'run_all_tests')
|
|
assert hasattr(runner, 'run_unit_tests')
|
|
assert hasattr(runner, 'run_integration_tests')
|
|
assert hasattr(runner, 'run_performance_tests')
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_fixture_manager_initialization(self):
|
|
"""Test that the fixture manager creates required test data"""
|
|
from tests.testing_suite import FixtureManager
|
|
|
|
manager = FixtureManager()
|
|
fixtures = await manager.create_test_fixtures()
|
|
|
|
assert 'audio_files' in fixtures
|
|
assert 'database' in fixtures
|
|
assert 'mock_services' in fixtures
|
|
assert len(fixtures['audio_files']) > 0
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_mock_service_factory(self):
|
|
"""Test that mock services are created correctly"""
|
|
from tests.testing_suite import MockServiceFactory
|
|
|
|
factory = MockServiceFactory()
|
|
youtube_service = factory.create_youtube_service()
|
|
transcription_service = factory.create_transcription_service()
|
|
|
|
# Test YouTube service mock
|
|
metadata = await youtube_service.extract_metadata("https://youtube.com/watch?v=test")
|
|
assert metadata['youtube_id'] == 'test'
|
|
assert 'title' in metadata
|
|
assert 'duration_seconds' in metadata
|
|
|
|
# Test transcription service mock
|
|
result = await transcription_service.transcribe(Path("test.wav"), 1)
|
|
assert 'raw_content' in result
|
|
assert 'text_content' in result
|
|
assert result['pipeline_version'] == 'v1'
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_performance_benchmark_runner(self):
|
|
"""Test that performance benchmarks execute correctly"""
|
|
from tests.testing_suite import PerformanceBenchmarkRunner
|
|
|
|
runner = PerformanceBenchmarkRunner()
|
|
results = await runner.run_transcription_benchmark()
|
|
|
|
assert 'duration_seconds' in results
|
|
assert 'real_time_factor' in results
|
|
assert 'memory_usage_mb' in results
|
|
assert results['real_time_factor'] < 1.0 # Should be faster than real-time
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_integration_test_runner(self):
|
|
"""Test that integration tests execute the full pipeline"""
|
|
from tests.testing_suite import IntegrationTestRunner
|
|
|
|
runner = IntegrationTestRunner()
|
|
result = await runner.test_v1_pipeline()
|
|
|
|
assert result['success'] is True
|
|
assert 'transcript' in result
|
|
assert 'processing_time' in result
|
|
assert result['transcript']['pipeline_version'] == 'v1'
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_cli_command_testing(self):
|
|
"""Test that CLI commands can be tested programmatically"""
|
|
from tests.testing_suite import CLITestRunner
|
|
|
|
runner = CLITestRunner()
|
|
|
|
# Test transcribe command
|
|
result = await runner.test_transcribe_command("test_audio.wav")
|
|
assert result['exit_code'] == 0
|
|
assert 'output' in result
|
|
|
|
# Test batch command
|
|
result = await runner.test_batch_command(["test1.wav", "test2.wav"])
|
|
assert result['exit_code'] == 0
|
|
assert 'processed_files' in result
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_database_migration_testing(self):
|
|
"""Test that database migrations can be tested"""
|
|
from tests.testing_suite import DatabaseMigrationTester
|
|
|
|
tester = DatabaseMigrationTester()
|
|
|
|
# Test migration up
|
|
result = await tester.test_migration_up()
|
|
assert result['success'] is True
|
|
assert 'applied_migrations' in result
|
|
|
|
# Test migration down
|
|
result = await tester.test_migration_down()
|
|
assert result['success'] is True
|
|
assert 'reverted_migrations' in result
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_coverage_reporting(self):
|
|
"""Test that test coverage is tracked and reported"""
|
|
from tests.testing_suite import CoverageReporter
|
|
|
|
reporter = CoverageReporter()
|
|
report = await reporter.generate_coverage_report()
|
|
|
|
assert 'total_coverage' in report
|
|
assert 'module_coverage' in report
|
|
assert report['total_coverage'] >= 80.0 # Minimum 80% coverage
|
|
|
|
# Check specific modules have good coverage
|
|
critical_modules = ['services', 'repositories', 'database']
|
|
for module in critical_modules:
|
|
if module in report['module_coverage']:
|
|
assert report['module_coverage'][module] >= 80.0
|
|
|
|
|
|
class TestErrorHandlingAndEdgeCases:
|
|
"""Test error handling and edge cases in the testing suite"""
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_missing_audio_file_handling(self):
|
|
"""Test how the suite handles missing audio files"""
|
|
from tests.testing_suite import TestSuiteRunner
|
|
|
|
runner = TestSuiteRunner()
|
|
|
|
with pytest.raises(FileNotFoundError):
|
|
await runner.test_with_missing_file("nonexistent.wav")
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_corrupted_audio_file_handling(self):
|
|
"""Test how the suite handles corrupted audio files"""
|
|
from tests.testing_suite import TestSuiteRunner
|
|
|
|
runner = TestSuiteRunner()
|
|
|
|
# Create a fake corrupted file
|
|
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
|
|
f.write(b'corrupted data')
|
|
corrupted_file = Path(f.name)
|
|
|
|
try:
|
|
result = await runner.test_with_corrupted_file(corrupted_file)
|
|
assert result['success'] is False
|
|
assert 'error' in result
|
|
finally:
|
|
corrupted_file.unlink()
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_database_connection_failure(self):
|
|
"""Test how the suite handles database connection failures"""
|
|
from tests.testing_suite import DatabaseMigrationTester
|
|
|
|
tester = DatabaseMigrationTester()
|
|
|
|
# Test with invalid database URL
|
|
result = await tester.test_with_invalid_db("invalid://connection")
|
|
assert result['success'] is False
|
|
assert 'connection_error' in result
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_memory_limit_handling(self):
|
|
"""Test how the suite handles memory limit scenarios"""
|
|
from tests.testing_suite import PerformanceBenchmarkRunner
|
|
|
|
runner = PerformanceBenchmarkRunner()
|
|
|
|
# Test with simulated memory pressure
|
|
result = await runner.test_memory_limits()
|
|
assert 'memory_usage_mb' in result
|
|
assert result['memory_usage_mb'] < 2000 # Should stay under 2GB
|
|
|
|
|
|
class TestPerformanceMetrics:
|
|
"""Test performance metrics and benchmarking"""
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_transcription_speed_benchmark(self):
|
|
"""Test transcription speed meets requirements"""
|
|
from tests.testing_suite import PerformanceBenchmarkRunner
|
|
|
|
runner = PerformanceBenchmarkRunner()
|
|
|
|
# Test v1 requirements: 5-minute audio in <30 seconds
|
|
result = await runner.benchmark_v1_transcription()
|
|
assert result['audio_duration_seconds'] == 300 # 5 minutes
|
|
assert result['processing_time_seconds'] < 30
|
|
assert result['real_time_factor'] < 0.1 # Much faster than real-time
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_memory_usage_benchmark(self):
|
|
"""Test memory usage stays within limits"""
|
|
from tests.testing_suite import PerformanceBenchmarkRunner
|
|
|
|
runner = PerformanceBenchmarkRunner()
|
|
|
|
result = await runner.benchmark_memory_usage()
|
|
assert result['peak_memory_mb'] < 2000 # v1 requirement: <2GB
|
|
assert result['average_memory_mb'] < 1500
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_batch_processing_performance(self):
|
|
"""Test batch processing performance"""
|
|
from tests.testing_suite import PerformanceBenchmarkRunner
|
|
|
|
runner = PerformanceBenchmarkRunner()
|
|
|
|
# Test processing 10 files
|
|
result = await runner.benchmark_batch_processing(10)
|
|
assert result['total_files'] == 10
|
|
assert result['successful_files'] == 10
|
|
assert result['failed_files'] == 0
|
|
assert result['total_time_seconds'] < 300 # Should process 10 files in <5 minutes
|