youtube-summarizer/test_enhanced_export_struct...

375 lines
14 KiB
Python

#!/usr/bin/env python3
"""Simplified test script for Story 4.4 Enhanced Export system structure."""
import asyncio
import os
import sys
import logging
from datetime import datetime
# Add the parent directory to Python path for imports
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from backend.services.executive_summary_generator import (
ExecutiveSummaryGenerator,
ExecutiveSummary,
ExecutiveMetrics
)
from backend.services.timestamp_processor import (
TimestampProcessor,
TimestampedSection,
SectionDetectionResult
)
from backend.services.enhanced_markdown_formatter import (
EnhancedMarkdownFormatter,
MarkdownExportConfig,
EnhancedMarkdownExport
)
from backend.services.enhanced_template_manager import (
EnhancedTemplateManager,
DomainCategory,
PromptTemplate,
TemplateStatus,
ModelConfig
)
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def test_data_structures():
"""Test the data structure definitions."""
logger.info("Testing data structure definitions...")
try:
# Test ExecutiveMetrics
metrics = ExecutiveMetrics(
duration_minutes=10,
word_count=1500,
main_topics=["AI", "Strategy", "Implementation"],
sentiment_score=0.7,
complexity_level="intermediate",
confidence_score=0.85
)
logger.info(f"✅ ExecutiveMetrics: {len(metrics.main_topics)} topics")
# Test ExecutiveSummary
summary = ExecutiveSummary(
overview="This is a test overview.",
key_metrics=metrics,
business_value="Significant ROI potential",
action_items=["Action 1", "Action 2"],
strategic_implications=["Implication 1", "Implication 2"],
sentiment_analysis={"positive": 0.7, "neutral": 0.2, "negative": 0.1},
processing_time_seconds=1.5,
created_at=datetime.now()
)
logger.info(f"✅ ExecutiveSummary: {len(summary.action_items)} action items")
# Test TimestampedSection
section = TimestampedSection(
index=1,
title="Introduction",
start_timestamp=0,
end_timestamp=60,
youtube_link="https://youtube.com/watch?v=test#t=0",
content="Introduction content",
summary="Brief summary",
key_points=["Point 1", "Point 2"],
confidence_score=0.9
)
logger.info(f"✅ TimestampedSection: {section.title} ({section.end_timestamp - section.start_timestamp}s)")
# Test SectionDetectionResult
detection_result = SectionDetectionResult(
sections=[section],
total_sections=1,
processing_time_seconds=2.1,
quality_score=0.85,
created_at=datetime.now()
)
logger.info(f"✅ SectionDetectionResult: {detection_result.total_sections} sections")
# Test MarkdownExportConfig
config = MarkdownExportConfig(
include_executive_summary=True,
include_timestamps=True,
include_toc=True,
section_detail_level="standard",
custom_template_id=None
)
logger.info(f"✅ MarkdownExportConfig: detail level = {config.section_detail_level}")
# Test EnhancedMarkdownExport
export_result = EnhancedMarkdownExport(
markdown_content="# Test Export\n\nContent here...",
metadata={"title": "Test", "sections": 2},
quality_score=0.8,
processing_time_seconds=1.2,
created_at=datetime.now()
)
logger.info(f"✅ EnhancedMarkdownExport: {len(export_result.markdown_content)} characters")
# Test PromptTemplate with ModelConfig
model_config = ModelConfig(
temperature=0.7,
max_tokens=1500,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
model_name="deepseek-chat"
)
template = PromptTemplate(
id="test-template-123",
name="Test Template",
description="A test template for validation",
prompt_text="Analyze this content: {content}",
domain_category=DomainCategory.GENERAL,
model_config=model_config,
is_public=True,
status=TemplateStatus.ACTIVE,
usage_count=5,
rating=4.2,
version="1.0.0",
created_at=datetime.now(),
updated_at=datetime.now(),
created_by="system",
tags=["test", "general"],
variables={"content": "Variable for content"},
performance_metrics={"avg_time": 1.5, "success_rate": 0.95}
)
logger.info(f"✅ PromptTemplate: {template.name} ({template.domain_category.value})")
return True
except Exception as e:
logger.error(f"❌ Data structure test failed: {e}")
return False
def test_service_initialization():
"""Test service class initialization without API calls."""
logger.info("Testing service initialization...")
try:
# Test ExecutiveSummaryGenerator initialization
executive_generator = ExecutiveSummaryGenerator()
logger.info(f"✅ ExecutiveSummaryGenerator initialized with {executive_generator.max_overview_paragraphs} paragraph limit")
# Test TimestampProcessor initialization
timestamp_processor = TimestampProcessor()
logger.info(f"✅ TimestampProcessor initialized with {timestamp_processor.min_section_duration}s min section")
# Test EnhancedMarkdownFormatter initialization
markdown_formatter = EnhancedMarkdownFormatter(executive_generator, timestamp_processor)
logger.info("✅ EnhancedMarkdownFormatter initialized with dependencies")
# Test EnhancedTemplateManager initialization
template_manager = EnhancedTemplateManager()
logger.info(f"✅ EnhancedTemplateManager initialized with {len(template_manager.templates)} initial templates")
return True
except Exception as e:
logger.error(f"❌ Service initialization test failed: {e}")
return False
async def test_template_manager_presets():
"""Test template manager preset functionality."""
logger.info("Testing template manager presets...")
try:
manager = EnhancedTemplateManager()
# Initialize domain presets
await manager.initialize_domain_presets()
logger.info(f"✅ Domain presets initialized: {manager.domain_presets_initialized}")
# Check all domain categories have templates
for domain in DomainCategory:
preset_id = f"preset_{domain.value}"
template = manager.templates.get(preset_id)
if template:
logger.info(f"{domain.value}: {template.name}")
else:
logger.warning(f"⚠️ Missing preset for {domain.value}")
# Test domain recommendations (no API calls)
sample_content = "This tutorial covers Python programming basics and data structures"
recommendations = await manager.get_domain_recommendations(sample_content, max_recommendations=3)
logger.info(f"✅ Domain recommendations: {len(recommendations)} found")
for rec in recommendations:
logger.info(f" - {rec['domain_category']}: {rec['confidence_score']:.2f} confidence")
# Test system stats
stats = await manager.get_system_stats()
logger.info(f"✅ System stats: {stats['total_templates']} templates")
logger.info(f" - Templates by domain: {stats['templates_by_domain']}")
return True
except Exception as e:
logger.error(f"❌ Template manager presets test failed: {e}")
return False
def test_api_imports():
"""Test that API endpoints can be imported."""
logger.info("Testing API endpoint imports...")
try:
from backend.api.enhanced_export import router
logger.info("✅ Enhanced export API router imported successfully")
# Check router has expected endpoints
routes = [route.path for route in router.routes]
expected_routes = [
"/enhanced",
"/config",
"/templates",
"/recommendations",
"/health"
]
found_routes = []
for expected in expected_routes:
matching_routes = [r for r in routes if expected in r]
if matching_routes:
found_routes.append(expected)
logger.info(f"✅ Found route pattern: {expected}")
else:
logger.warning(f"⚠️ Missing route pattern: {expected}")
logger.info(f"✅ API routes: {len(found_routes)}/{len(expected_routes)} patterns found")
return True
except Exception as e:
logger.error(f"❌ API import test failed: {e}")
return False
def test_database_models():
"""Test that database models can be imported."""
logger.info("Testing database model imports...")
try:
from backend.models.enhanced_export import (
PromptTemplate as DBPromptTemplate,
ExportMetadata,
SummarySection,
PromptExperiment
)
logger.info("✅ Enhanced export database models imported successfully")
# Check model attributes exist
required_fields = {
'PromptTemplate': ['id', 'name', 'prompt_text', 'domain_category'],
'ExportMetadata': ['id', 'summary_id', 'export_type', 'quality_score'],
'SummarySection': ['id', 'summary_id', 'title', 'start_timestamp'],
'PromptExperiment': ['id', 'name', 'baseline_template_id', 'variant_template_id']
}
models = {
'PromptTemplate': DBPromptTemplate,
'ExportMetadata': ExportMetadata,
'SummarySection': SummarySection,
'PromptExperiment': PromptExperiment
}
for model_name, model_class in models.items():
expected_fields = required_fields[model_name]
for field in expected_fields:
if hasattr(model_class, field):
logger.info(f"{model_name}.{field} exists")
else:
logger.warning(f"⚠️ {model_name}.{field} missing")
return True
except Exception as e:
logger.error(f"❌ Database model test failed: {e}")
return False
async def test_integrated_functionality():
"""Test integrated functionality without API calls."""
logger.info("Testing integrated functionality...")
try:
# Test complete system structure
tests = [
("Data Structures", lambda: test_data_structures()),
("Service Initialization", lambda: test_service_initialization()),
("Template Manager Presets", test_template_manager_presets()),
("API Endpoint Imports", lambda: test_api_imports()),
("Database Model Imports", lambda: test_database_models())
]
results = []
for test_name, test_func in tests:
logger.info(f"\n{'='*50}")
logger.info(f"Running: {test_name}")
logger.info(f"{'='*50}")
try:
if asyncio.iscoroutine(test_func):
result = await test_func
else:
result = test_func()
results.append((test_name, result))
except Exception as e:
logger.error(f"Test {test_name} failed with exception: {e}")
results.append((test_name, False))
# Print final results
logger.info(f"\n{'='*50}")
logger.info("FINAL STRUCTURE TEST RESULTS")
logger.info(f"{'='*50}")
passed = 0
total = len(results)
for test_name, result in results:
status = "✅ PASSED" if result else "❌ FAILED"
logger.info(f"{test_name}: {status}")
if result:
passed += 1
logger.info(f"\nOverall: {passed}/{total} tests passed ({passed/total*100:.1f}%)")
if passed == total:
logger.info("🎉 Story 4.4 Enhanced Export System Structure: ALL TESTS PASSED!")
return True
else:
logger.warning(f"⚠️ Story 4.4 Enhanced Export System: {total-passed} structural tests failed")
return False
except Exception as e:
logger.error(f"❌ Integrated functionality test failed: {e}")
return False
async def main():
"""Main test function."""
logger.info("Starting Story 4.4 Enhanced Export System Structure Tests")
logger.info(f"Test started at: {datetime.now().isoformat()}")
success = await test_integrated_functionality()
logger.info(f"Test completed at: {datetime.now().isoformat()}")
if success:
logger.info("🎉 Story 4.4: Custom AI Models & Enhanced Markdown Export - STRUCTURE COMPLETE!")
logger.info("📝 Note: Full end-to-end testing requires valid API keys for DeepSeek service")
sys.exit(0)
else:
logger.error("❌ Story 4.4 structure tests failed")
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())