344 lines
12 KiB
Python
344 lines
12 KiB
Python
"""
|
|
Unit tests for performance optimization services.
|
|
|
|
Tests core functionality of resource monitoring, M3 optimization,
|
|
and performance benchmarking.
|
|
"""
|
|
|
|
import asyncio
|
|
import pytest
|
|
from unittest.mock import MagicMock, patch
|
|
from typing import Dict, List
|
|
|
|
from src.services.performance import (
|
|
ResourceMonitor,
|
|
SystemResources,
|
|
PerformanceMetrics,
|
|
M3OptimizationConfig
|
|
)
|
|
from src.services.performance_optimizer import PerformanceOptimizer
|
|
from src.services.ffmpeg_optimizer import FFmpegOptimizer
|
|
from src.services.performance_benchmarker import PerformanceBenchmarker
|
|
|
|
|
|
class TestResourceMonitor:
|
|
"""Test resource monitoring functionality."""
|
|
|
|
@pytest.fixture
|
|
def resource_monitor(self):
|
|
"""Create a resource monitor instance."""
|
|
return ResourceMonitor(
|
|
memory_threshold=80.0,
|
|
cpu_threshold=90.0,
|
|
disk_threshold=85.0
|
|
)
|
|
|
|
def test_resource_monitor_initialization(self, resource_monitor):
|
|
"""Test resource monitor initialization."""
|
|
assert resource_monitor.memory_threshold == 80.0
|
|
assert resource_monitor.cpu_threshold == 90.0
|
|
assert resource_monitor.disk_threshold == 85.0
|
|
assert not resource_monitor.is_monitoring
|
|
|
|
@patch('psutil.virtual_memory')
|
|
@patch('psutil.cpu_percent')
|
|
@patch('psutil.disk_usage')
|
|
def test_get_system_resources(self, mock_disk, mock_cpu, mock_vm, resource_monitor):
|
|
"""Test getting current system resource usage."""
|
|
# Mock system resources
|
|
mock_vm.return_value.percent = 75.0
|
|
mock_vm.return_value.available = 1024 * 1024 * 1024 # 1GB
|
|
mock_cpu.return_value = 65.0
|
|
mock_disk.return_value.percent = 60.0
|
|
|
|
resources = resource_monitor.get_system_resources()
|
|
|
|
assert resources.memory_percent == 75.0
|
|
assert resources.cpu_percent == 65.0
|
|
assert resources.disk_percent == 60.0
|
|
assert resources.memory_available_mb > 0
|
|
|
|
def test_resource_threshold_checking(self, resource_monitor):
|
|
"""Test resource threshold checking logic."""
|
|
# Test normal usage
|
|
assert not resource_monitor.is_memory_critical(75.0)
|
|
assert not resource_monitor.is_cpu_critical(65.0)
|
|
assert not resource_monitor.is_disk_critical(60.0)
|
|
|
|
# Test critical usage
|
|
assert resource_monitor.is_memory_critical(85.0)
|
|
assert resource_monitor.is_cpu_critical(95.0)
|
|
assert resource_monitor.is_disk_critical(90.0)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_start_stop_monitoring(self, resource_monitor):
|
|
"""Test starting and stopping resource monitoring."""
|
|
# Start monitoring
|
|
await resource_monitor.start_monitoring()
|
|
assert resource_monitor.is_monitoring
|
|
assert resource_monitor.monitor_task is not None
|
|
|
|
# Stop monitoring
|
|
await resource_monitor.stop_monitoring()
|
|
assert not resource_monitor.is_monitoring
|
|
assert resource_monitor.monitor_task is None
|
|
|
|
|
|
class TestPerformanceOptimizer:
|
|
"""Test performance optimization functionality."""
|
|
|
|
@pytest.fixture
|
|
def optimizer(self):
|
|
"""Create a performance optimizer instance."""
|
|
return PerformanceOptimizer(max_workers=8)
|
|
|
|
def test_optimizer_initialization(self, optimizer):
|
|
"""Test optimizer initialization."""
|
|
assert optimizer.max_workers == 8
|
|
assert optimizer.resource_monitor is not None
|
|
assert optimizer.m3_config is not None
|
|
|
|
def test_get_m3_optimization_config(self, optimizer):
|
|
"""Test M3 optimization configuration."""
|
|
config = optimizer.get_m3_optimization_config()
|
|
|
|
assert config.use_metal_acceleration
|
|
assert config.optimize_memory_layout
|
|
assert config.parallel_processing_enabled
|
|
assert config.chunk_size_mb > 0
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_optimize_processing_pipeline(self, optimizer):
|
|
"""Test processing pipeline optimization."""
|
|
# Mock a simple processing function
|
|
async def process_item(item):
|
|
await asyncio.sleep(0.01) # Simulate work
|
|
return f"processed_{item}"
|
|
|
|
items = list(range(5))
|
|
|
|
# Test optimization
|
|
result = await optimizer.optimize_processing_pipeline(
|
|
items, process_item, batch_size=2
|
|
)
|
|
|
|
assert len(result) == 5
|
|
assert all(r.startswith("processed_") for r in result)
|
|
|
|
|
|
class TestFFmpegOptimizer:
|
|
"""Test FFmpeg optimization functionality."""
|
|
|
|
@pytest.fixture
|
|
def ffmpeg_optimizer(self):
|
|
"""Create an FFmpeg optimizer instance."""
|
|
return FFmpegOptimizer()
|
|
|
|
def test_ffmpeg_optimizer_initialization(self, ffmpeg_optimizer):
|
|
"""Test FFmpeg optimizer initialization."""
|
|
assert ffmpeg_optimizer.m3_optimized
|
|
assert ffmpeg_optimizer.hardware_acceleration_enabled
|
|
assert ffmpeg_optimizer.audio_quality_preserved
|
|
|
|
def test_get_m3_optimized_params(self, ffmpeg_optimizer):
|
|
"""Test M3-optimized FFmpeg parameters."""
|
|
# Test video input
|
|
params = ffmpeg_optimizer.get_optimized_params(
|
|
input_format="mp4",
|
|
output_format="wav",
|
|
target_sample_rate=16000
|
|
)
|
|
|
|
assert "-hwaccel" in params
|
|
assert "videotoolbox" in params
|
|
assert "-ar" in params
|
|
assert "16000" in params
|
|
assert "-ac" in params
|
|
assert "1" in params
|
|
|
|
def test_get_transcription_optimized_params(self, ffmpeg_optimizer):
|
|
"""Test transcription-optimized FFmpeg parameters."""
|
|
params = ffmpeg_optimizer.get_transcription_optimized_params(
|
|
input_format="mp4"
|
|
)
|
|
|
|
assert "-ar" in params
|
|
assert "16000" in params
|
|
assert "-ac" in params
|
|
assert "1" in params
|
|
assert "-f" in params
|
|
assert "wav" in params
|
|
|
|
|
|
class TestPerformanceBenchmarker:
|
|
"""Test performance benchmarking functionality."""
|
|
|
|
@pytest.fixture
|
|
def benchmarker(self):
|
|
"""Create a performance benchmarker instance."""
|
|
return PerformanceBenchmarker()
|
|
|
|
@pytest.fixture
|
|
def sample_metrics(self):
|
|
"""Create sample performance metrics."""
|
|
return PerformanceMetrics(
|
|
operation="transcription",
|
|
duration_seconds=2.5,
|
|
memory_peak_mb=512.0,
|
|
cpu_peak_percent=75.0,
|
|
throughput_items_per_second=4.0,
|
|
error_count=0,
|
|
success_count=10,
|
|
total_count=10
|
|
)
|
|
|
|
def test_benchmarker_initialization(self, benchmarker):
|
|
"""Test benchmarker initialization."""
|
|
assert benchmarker.benchmarks == {}
|
|
assert benchmarker.report_format == "json"
|
|
|
|
def test_record_operation(self, benchmarker, sample_metrics):
|
|
"""Test recording operation metrics."""
|
|
benchmarker.record_operation("test_op", sample_metrics)
|
|
|
|
assert "test_op" in benchmarker.benchmarks
|
|
assert len(benchmarker.benchmarks["test_op"]) == 1
|
|
assert benchmarker.benchmarks["test_op"][0] == sample_metrics
|
|
|
|
def test_calculate_statistics(self, benchmarker):
|
|
"""Test calculating performance statistics."""
|
|
# Add multiple metrics
|
|
metrics1 = PerformanceMetrics(
|
|
operation="test",
|
|
duration_seconds=1.0,
|
|
memory_peak_mb=100.0,
|
|
cpu_peak_percent=50.0,
|
|
throughput_items_per_second=10.0,
|
|
error_count=0,
|
|
success_count=5,
|
|
total_count=5
|
|
)
|
|
|
|
metrics2 = PerformanceMetrics(
|
|
operation="test",
|
|
duration_seconds=2.0,
|
|
memory_peak_mb=200.0,
|
|
cpu_peak_percent=75.0,
|
|
throughput_items_per_second=5.0,
|
|
error_count=1,
|
|
success_count=4,
|
|
total_count=5
|
|
)
|
|
|
|
benchmarker.record_operation("test", metrics1)
|
|
benchmarker.record_operation("test", metrics2)
|
|
|
|
stats = benchmarker.calculate_statistics("test")
|
|
|
|
assert stats.avg_duration == 1.5
|
|
assert stats.avg_memory_peak == 150.0
|
|
assert stats.avg_cpu_peak == 62.5
|
|
assert stats.avg_throughput == 7.5
|
|
assert stats.total_operations == 2
|
|
assert stats.success_rate == 90.0
|
|
|
|
def test_generate_report(self, benchmarker, sample_metrics):
|
|
"""Test report generation."""
|
|
benchmarker.record_operation("test_op", sample_metrics)
|
|
|
|
report = benchmarker.generate_report()
|
|
|
|
assert "operations" in report
|
|
assert "test_op" in report["operations"]
|
|
assert "statistics" in report["operations"]["test_op"]
|
|
assert "latest_metrics" in report["operations"]["test_op"]
|
|
|
|
|
|
class TestPerformanceIntegration:
|
|
"""Integration tests for performance optimization."""
|
|
|
|
@pytest.fixture
|
|
def performance_system(self):
|
|
"""Create a complete performance optimization system."""
|
|
from src.services.performance import ResourceMonitor
|
|
from src.services.performance_optimizer import PerformanceOptimizer
|
|
from src.services.ffmpeg_optimizer import FFmpegOptimizer
|
|
from src.services.performance_benchmarker import PerformanceBenchmarker
|
|
|
|
return {
|
|
"resource_monitor": ResourceMonitor(),
|
|
"optimizer": PerformanceOptimizer(),
|
|
"ffmpeg_optimizer": FFmpegOptimizer(),
|
|
"benchmarker": PerformanceBenchmarker()
|
|
}
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_end_to_end_optimization(self, performance_system):
|
|
"""Test end-to-end performance optimization workflow."""
|
|
system = performance_system
|
|
|
|
# Start resource monitoring
|
|
await system["resource_monitor"].start_monitoring()
|
|
|
|
# Simulate processing with optimization
|
|
async def process_item(item):
|
|
await asyncio.sleep(0.01)
|
|
return f"processed_{item}"
|
|
|
|
items = list(range(3))
|
|
|
|
# Process with optimization
|
|
result = await system["optimizer"].optimize_processing_pipeline(
|
|
items, process_item, batch_size=2
|
|
)
|
|
|
|
# Create performance metrics
|
|
metrics = PerformanceMetrics(
|
|
operation="test_optimization",
|
|
duration_seconds=0.1,
|
|
memory_peak_mb=50.0,
|
|
cpu_peak_percent=30.0,
|
|
throughput_items_per_second=30.0,
|
|
error_count=0,
|
|
success_count=len(result),
|
|
total_count=len(items)
|
|
)
|
|
|
|
# Record metrics
|
|
system["benchmarker"].record_operation("test_optimization", metrics)
|
|
|
|
# Stop monitoring
|
|
await system["resource_monitor"].stop_monitoring()
|
|
|
|
# Verify results
|
|
assert len(result) == 3
|
|
assert all(r.startswith("processed_") for r in result)
|
|
assert "test_optimization" in system["benchmarker"].benchmarks
|
|
|
|
def test_m3_optimization_configuration(self, performance_system):
|
|
"""Test M3 optimization configuration integration."""
|
|
system = performance_system
|
|
|
|
# Get M3 config from optimizer
|
|
m3_config = system["optimizer"].get_m3_optimization_config()
|
|
|
|
# Get FFmpeg params for M3
|
|
ffmpeg_params = system["ffmpeg_optimizer"].get_optimized_params(
|
|
input_format="mp4",
|
|
output_format="wav",
|
|
target_sample_rate=16000
|
|
)
|
|
|
|
# Verify M3 optimizations are enabled
|
|
assert m3_config.use_metal_acceleration
|
|
assert m3_config.parallel_processing_enabled
|
|
|
|
# Verify FFmpeg uses M3 optimizations
|
|
assert "-hwaccel" in ffmpeg_params
|
|
assert "videotoolbox" in ffmpeg_params
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pytest.main([__file__, "-v"])
|
|
|