youtube-summarizer/backend/api/transcripts_stub.py

147 lines
4.5 KiB
Python

"""
Simple stub for transcripts endpoints to prevent frontend errors.
This provides basic responses to prevent the infinite loading loop.
"""
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from typing import List, Dict, Any, Optional
router = APIRouter(prefix="/api/transcripts", tags=["transcripts"])
# YouTube Auth router for missing endpoints
youtube_auth_router = APIRouter(prefix="/api/youtube-auth", tags=["youtube-auth"])
class EstimateRequest(BaseModel):
video_url: str
transcript_source: str = "youtube"
video_duration_seconds: Optional[int] = None
class EstimateResponse(BaseModel):
estimated_time_seconds: int
estimated_size_mb: float
confidence: str
status: str = "available"
transcript_source: str
class ExtractRequest(BaseModel):
video_id: str
language_preference: str = "en"
include_metadata: bool = True
class JobResponse(BaseModel):
job_id: str
status: str
message: str
estimated_completion_time: Optional[int] = None
class JobStatusResponse(BaseModel):
job_id: str
status: str
progress_percentage: int
current_message: str
result: Optional[Dict[str, Any]] = None
error: Optional[str] = None
@router.post("/dual/estimate", response_model=EstimateResponse)
async def get_processing_estimate(request: EstimateRequest):
"""
Provide a simple estimate response to prevent frontend errors.
This is a stub endpoint to stop the infinite loading loop.
"""
# Simple estimates based on transcript source
if request.transcript_source == "youtube":
estimated_time = 5 # 5 seconds for YouTube captions
estimated_size = 0.5 # 500KB typical size
confidence = "high"
elif request.transcript_source == "whisper":
estimated_time = 120 # 2 minutes for Whisper processing
estimated_size = 2.0 # 2MB typical size
confidence = "high"
else:
estimated_time = 10 # 10 seconds for both
estimated_size = 1.0 # 1MB typical size
confidence = "medium"
return EstimateResponse(
transcript_source=request.transcript_source,
estimated_time_seconds=estimated_time,
estimated_size_mb=estimated_size,
confidence=confidence,
status="available"
)
@router.post("/extract", response_model=JobResponse)
async def extract_transcript(request: ExtractRequest):
"""
Start transcript extraction job.
This is a stub endpoint that simulates starting a transcript extraction job.
"""
import uuid
job_id = str(uuid.uuid4())
return JobResponse(
job_id=job_id,
status="started",
message=f"Transcript extraction started for video {request.video_id}",
estimated_completion_time=30 # 30 seconds estimated
)
@router.get("/jobs/{job_id}", response_model=JobStatusResponse)
async def get_extraction_status(job_id: str):
"""
Get the status of a transcript extraction job.
This is a stub endpoint that simulates job completion.
"""
# For demo purposes, always return a completed job with mock transcript
mock_transcript = [
{"start": 0.0, "text": "Welcome to this video about artificial intelligence."},
{"start": 3.2, "text": "Today we'll explore the fascinating world of machine learning."},
{"start": 7.8, "text": "We'll cover neural networks, deep learning, and practical applications."},
{"start": 12.1, "text": "This technology is transforming industries across the globe."}
]
return JobStatusResponse(
job_id=job_id,
status="completed",
progress_percentage=100,
current_message="Transcript extraction completed successfully",
result={
"video_id": "DCquejfz04A",
"transcript": mock_transcript,
"metadata": {
"title": "Sample Video Title",
"duration": "15.5 seconds",
"language": "en",
"word_count": 25,
"extraction_method": "youtube_captions",
"processing_time_seconds": 2.3,
"estimated_reading_time": 30
}
},
error=None
)
@youtube_auth_router.get("/status")
async def get_youtube_auth_status():
"""
Stub endpoint for YouTube authentication status.
Returns guest mode status to prevent 404 errors.
"""
return {
"authenticated": False,
"user": None,
"status": "guest_mode",
"message": "Using guest mode - no authentication required"
}