|
1 | 1 | from fastapi import APIRouter, Request, HTTPException
|
| 2 | +from fastapi.responses import StreamingResponse |
2 | 3 | from dotenv import load_dotenv
|
3 | 4 | from app.services.github_service import GitHubService
|
4 | 5 | from app.services.o3_mini_openrouter_service import OpenRouterO3Service
|
|
12 | 13 | from pydantic import BaseModel
|
13 | 14 | from functools import lru_cache
|
14 | 15 | import re
|
| 16 | +import json |
| 17 | +import asyncio |
15 | 18 |
|
16 | 19 | # from app.services.claude_service import ClaudeService
|
17 | 20 | # from app.core.limiter import limiter
|
@@ -49,6 +52,7 @@ class ApiRequest(BaseModel):
|
49 | 52 | github_pat: str | None = None
|
50 | 53 |
|
51 | 54 |
|
| 55 | +# OLD NON STREAMING VERSION |
52 | 56 | @router.post("")
|
53 | 57 | # @limiter.limit("1/minute;5/day") # TEMP: disable rate limit for growth??
|
54 | 58 | async def generate(request: Request, body: ApiRequest):
|
@@ -268,3 +272,149 @@ def replace_path(match):
|
268 | 272 | # Match click events: click ComponentName "path/to/something"
|
269 | 273 | click_pattern = r'click ([^\s"]+)\s+"([^"]+)"'
|
270 | 274 | return re.sub(click_pattern, replace_path, diagram)
|
| 275 | + |
| 276 | + |
| 277 | +@router.post("/stream") |
| 278 | +async def generate_stream(request: Request, body: ApiRequest): |
| 279 | + try: |
| 280 | + # Initial validation checks |
| 281 | + if len(body.instructions) > 1000: |
| 282 | + return {"error": "Instructions exceed maximum length of 1000 characters"} |
| 283 | + |
| 284 | + if body.repo in [ |
| 285 | + "fastapi", |
| 286 | + "streamlit", |
| 287 | + "flask", |
| 288 | + "api-analytics", |
| 289 | + "monkeytype", |
| 290 | + ]: |
| 291 | + return {"error": "Example repos cannot be regenerated"} |
| 292 | + |
| 293 | + async def event_generator(): |
| 294 | + try: |
| 295 | + # Get cached github data |
| 296 | + github_data = get_cached_github_data( |
| 297 | + body.username, body.repo, body.github_pat |
| 298 | + ) |
| 299 | + default_branch = github_data["default_branch"] |
| 300 | + file_tree = github_data["file_tree"] |
| 301 | + readme = github_data["readme"] |
| 302 | + |
| 303 | + # Send initial status |
| 304 | + yield f"data: {json.dumps({'status': 'started', 'message': 'Starting generation process...'})}\n\n" |
| 305 | + await asyncio.sleep(0.1) |
| 306 | + |
| 307 | + # Token count check |
| 308 | + combined_content = f"{file_tree}\n{readme}" |
| 309 | + token_count = o3_service.count_tokens(combined_content) |
| 310 | + |
| 311 | + if 50000 < token_count < 195000 and not body.api_key: |
| 312 | + yield f"data: {json.dumps({'error': f'File tree and README combined exceeds token limit (50,000). Current size: {token_count} tokens. This GitHub repository is too large for my wallet, but you can continue by providing your own OpenRouter API key.'})}\n\n" |
| 313 | + return |
| 314 | + elif token_count > 195000: |
| 315 | + yield f"data: {json.dumps({'error': f'Repository is too large (>195k tokens) for analysis. OpenAI o3-mini\'s max context length is 200k tokens. Current size: {token_count} tokens.'})}\n\n" |
| 316 | + return |
| 317 | + |
| 318 | + # Prepare prompts |
| 319 | + first_system_prompt = SYSTEM_FIRST_PROMPT |
| 320 | + third_system_prompt = SYSTEM_THIRD_PROMPT |
| 321 | + if body.instructions: |
| 322 | + first_system_prompt = ( |
| 323 | + first_system_prompt |
| 324 | + + "\n" |
| 325 | + + ADDITIONAL_SYSTEM_INSTRUCTIONS_PROMPT |
| 326 | + ) |
| 327 | + third_system_prompt = ( |
| 328 | + third_system_prompt |
| 329 | + + "\n" |
| 330 | + + ADDITIONAL_SYSTEM_INSTRUCTIONS_PROMPT |
| 331 | + ) |
| 332 | + |
| 333 | + # Phase 1: Get explanation |
| 334 | + yield f"data: {json.dumps({'status': 'explanation_sent', 'message': 'Sending explanation request to o3-mini...'})}\n\n" |
| 335 | + await asyncio.sleep(0.1) |
| 336 | + yield f"data: {json.dumps({'status': 'explanation', 'message': 'Analyzing repository structure...'})}\n\n" |
| 337 | + explanation = "" |
| 338 | + async for chunk in o3_service.call_o3_api_stream( |
| 339 | + system_prompt=first_system_prompt, |
| 340 | + data={ |
| 341 | + "file_tree": file_tree, |
| 342 | + "readme": readme, |
| 343 | + "instructions": body.instructions, |
| 344 | + }, |
| 345 | + api_key=body.api_key, |
| 346 | + reasoning_effort="medium", |
| 347 | + ): |
| 348 | + explanation += chunk |
| 349 | + yield f"data: {json.dumps({'status': 'explanation_chunk', 'chunk': chunk})}\n\n" |
| 350 | + |
| 351 | + if "BAD_INSTRUCTIONS" in explanation: |
| 352 | + yield f"data: {json.dumps({'error': 'Invalid or unclear instructions provided'})}\n\n" |
| 353 | + return |
| 354 | + |
| 355 | + # Phase 2: Get component mapping |
| 356 | + yield f"data: {json.dumps({'status': 'mapping_sent', 'message': 'Sending component mapping request to o3-mini...'})}\n\n" |
| 357 | + await asyncio.sleep(0.1) |
| 358 | + yield f"data: {json.dumps({'status': 'mapping', 'message': 'Creating component mapping...'})}\n\n" |
| 359 | + full_second_response = "" |
| 360 | + async for chunk in o3_service.call_o3_api_stream( |
| 361 | + system_prompt=SYSTEM_SECOND_PROMPT, |
| 362 | + data={"explanation": explanation, "file_tree": file_tree}, |
| 363 | + api_key=body.api_key, |
| 364 | + reasoning_effort="medium", |
| 365 | + ): |
| 366 | + full_second_response += chunk |
| 367 | + yield f"data: {json.dumps({'status': 'mapping_chunk', 'chunk': chunk})}\n\n" |
| 368 | + |
| 369 | + # i dont think i need this anymore? but keep it here for now |
| 370 | + # Extract component mapping |
| 371 | + start_tag = "<component_mapping>" |
| 372 | + end_tag = "</component_mapping>" |
| 373 | + component_mapping_text = full_second_response[ |
| 374 | + full_second_response.find(start_tag) : full_second_response.find( |
| 375 | + end_tag |
| 376 | + ) |
| 377 | + ] |
| 378 | + |
| 379 | + # Phase 3: Generate Mermaid diagram |
| 380 | + yield f"data: {json.dumps({'status': 'diagram_sent', 'message': 'Sending diagram generation request to o3-mini...'})}\n\n" |
| 381 | + await asyncio.sleep(0.1) |
| 382 | + yield f"data: {json.dumps({'status': 'diagram', 'message': 'Generating diagram...'})}\n\n" |
| 383 | + mermaid_code = "" |
| 384 | + async for chunk in o3_service.call_o3_api_stream( |
| 385 | + system_prompt=third_system_prompt, |
| 386 | + data={ |
| 387 | + "explanation": explanation, |
| 388 | + "component_mapping": component_mapping_text, |
| 389 | + "instructions": body.instructions, |
| 390 | + }, |
| 391 | + api_key=body.api_key, |
| 392 | + reasoning_effort="medium", |
| 393 | + ): |
| 394 | + mermaid_code += chunk |
| 395 | + yield f"data: {json.dumps({'status': 'diagram_chunk', 'chunk': chunk})}\n\n" |
| 396 | + |
| 397 | + # Process final diagram |
| 398 | + mermaid_code = mermaid_code.replace("```mermaid", "").replace("```", "") |
| 399 | + if "BAD_INSTRUCTIONS" in mermaid_code: |
| 400 | + yield f"data: {json.dumps({'error': 'Invalid or unclear instructions provided'})}\n\n" |
| 401 | + return |
| 402 | + |
| 403 | + processed_diagram = process_click_events( |
| 404 | + mermaid_code, body.username, body.repo, default_branch |
| 405 | + ) |
| 406 | + |
| 407 | + # Send final result |
| 408 | + yield f"data: {json.dumps({ |
| 409 | + 'status': 'complete', |
| 410 | + 'diagram': processed_diagram, |
| 411 | + 'explanation': explanation, |
| 412 | + 'mapping': component_mapping_text |
| 413 | + })}\n\n" |
| 414 | + |
| 415 | + except Exception as e: |
| 416 | + yield f"data: {json.dumps({'error': str(e)})}\n\n" |
| 417 | + |
| 418 | + return StreamingResponse(event_generator(), media_type="text/event-stream") |
| 419 | + except Exception as e: |
| 420 | + return {"error": str(e)} |
0 commit comments