126 lines
4.6 KiB
Python
126 lines
4.6 KiB
Python
#!/usr/bin/env python3
|
|
"""Test Gemini directly using the models endpoint."""
|
|
|
|
import requests
|
|
import json
|
|
|
|
def test_direct_gemini():
|
|
"""Test Gemini directly through the models endpoint."""
|
|
|
|
print("🎯 Direct Gemini Test")
|
|
print("=" * 40)
|
|
|
|
# Simple test transcript
|
|
test_transcript = """
|
|
This is a comprehensive video about artificial intelligence and machine learning.
|
|
The speaker discusses the evolution of neural networks, from simple perceptrons to
|
|
complex deep learning architectures. Key topics covered include convolutional neural
|
|
networks for computer vision, recurrent neural networks for sequence processing,
|
|
and transformer architectures that have revolutionized natural language processing.
|
|
The video also explores practical applications in healthcare, autonomous vehicles,
|
|
and recommendation systems. The presenter emphasizes the importance of ethical AI
|
|
development and responsible deployment of machine learning systems in production
|
|
environments.
|
|
""".strip()
|
|
|
|
request_data = {
|
|
"model": "gemini-1.5-pro", # Explicitly request Gemini
|
|
"transcript": test_transcript,
|
|
"options": {
|
|
"length": "brief",
|
|
"focus_areas": ["AI", "machine learning", "neural networks"]
|
|
}
|
|
}
|
|
|
|
try:
|
|
print("📤 Testing direct Gemini summarization...")
|
|
response = requests.post(
|
|
"http://localhost:8000/api/models/summarize",
|
|
json=request_data,
|
|
headers={"Content-Type": "application/json"},
|
|
timeout=60
|
|
)
|
|
|
|
print(f"📨 Response status: {response.status_code}")
|
|
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
print("✅ SUCCESS! Gemini summarization worked!")
|
|
|
|
# Display results
|
|
if 'summary' in result:
|
|
summary = result['summary']
|
|
print(f"\n📋 Summary: {summary}")
|
|
|
|
if 'key_points' in result:
|
|
key_points = result.get('key_points', [])
|
|
print(f"\n🔑 Key Points ({len(key_points)} found):")
|
|
for i, point in enumerate(key_points, 1):
|
|
print(f" {i}. {point}")
|
|
|
|
if 'cost_data' in result:
|
|
cost_data = result['cost_data']
|
|
cost = cost_data.get('total_cost_usd', 0)
|
|
input_tokens = cost_data.get('input_tokens', 0)
|
|
output_tokens = cost_data.get('output_tokens', 0)
|
|
print(f"\n💰 Cost: ${cost:.4f}")
|
|
print(f"📄 Input tokens: {input_tokens:,}")
|
|
print(f"📝 Output tokens: {output_tokens:,}")
|
|
|
|
if 'processing_metadata' in result:
|
|
metadata = result['processing_metadata']
|
|
model_used = metadata.get('model_used', 'N/A')
|
|
processing_time = metadata.get('processing_time_seconds', 0)
|
|
print(f"\n🤖 Model: {model_used}")
|
|
print(f"⚡ Time: {processing_time:.2f}s")
|
|
|
|
if 'large_context_advantage' in metadata:
|
|
advantage = metadata['large_context_advantage']
|
|
print(f"🚀 Context advantage: {advantage}")
|
|
|
|
return True
|
|
|
|
else:
|
|
print(f"❌ Failed with status {response.status_code}")
|
|
try:
|
|
error = response.json()
|
|
print(f"Error: {error}")
|
|
except:
|
|
print(f"Error text: {response.text}")
|
|
|
|
return False
|
|
|
|
except Exception as e:
|
|
print(f"❌ Exception: {e}")
|
|
return False
|
|
|
|
def test_health_check():
|
|
"""Quick health check."""
|
|
print("\n=== Health Check ===")
|
|
|
|
try:
|
|
response = requests.get("http://localhost:8000/health")
|
|
if response.status_code == 200:
|
|
print("✅ Backend is healthy")
|
|
return True
|
|
else:
|
|
print(f"❌ Health check failed: {response.status_code}")
|
|
return False
|
|
except Exception as e:
|
|
print(f"❌ Health check error: {e}")
|
|
return False
|
|
|
|
if __name__ == "__main__":
|
|
if test_health_check():
|
|
success = test_direct_gemini()
|
|
|
|
if success:
|
|
print("\n🎉 ✅ GEMINI DIRECT TEST PASSED!")
|
|
print("✅ Gemini 1.5 Pro is working correctly")
|
|
print("✅ Ready for YouTube video processing")
|
|
else:
|
|
print("\n❌ Direct Gemini test failed")
|
|
else:
|
|
print("❌ Backend not available")
|
|
|
|
print("\n" + "=" * 40) |