Fix AI caller hanging on 'thinking...' indefinitely

- Add 30s timeout to all frontend fetch calls (safeFetch)
- Add 20s asyncio.timeout around lock+LLM in chat, ai-respond, auto-respond
- Reduce OpenRouter timeout from 60s to 25s
- Reduce Inworld TTS timeout from 60s to 25s
- Return graceful fallback responses on timeout instead of hanging

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-06 21:16:15 -07:00
parent cac80a4b52
commit b3fb3b1127
4 changed files with 83 additions and 59 deletions

View File

@@ -114,7 +114,7 @@ class LLMService:
"""Call OpenRouter API with retry"""
for attempt in range(2): # Try twice
try:
async with httpx.AsyncClient(timeout=60.0) as client:
async with httpx.AsyncClient(timeout=25.0) as client:
response = await client.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={

View File

@@ -600,7 +600,7 @@ async def generate_speech_inworld(text: str, voice_id: str) -> tuple[np.ndarray,
},
}
async with httpx.AsyncClient(timeout=60.0) as client:
async with httpx.AsyncClient(timeout=25.0) as client:
response = await client.post(url, json=payload, headers=headers)
response.raise_for_status()
data = response.json()