Remove news/research from caller prompts — was causing timeouts

Research results were bloating the system prompt, making LLM calls
slower and hitting the 20s timeout. Callers don't need news awareness
to have good conversations.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-06 21:44:22 -07:00
parent e45ba2617a
commit 164f8fbc6e

View File

@@ -921,8 +921,8 @@ async def start_call(caller_key: str):
session.start_call(caller_key) session.start_call(caller_key)
caller = session.caller # This generates the background if needed caller = session.caller # This generates the background if needed
if not session.news_headlines: # if not session.news_headlines:
asyncio.create_task(_fetch_session_headlines()) # asyncio.create_task(_fetch_session_headlines())
return { return {
"status": "connected", "status": "connected",
@@ -1077,7 +1077,7 @@ async def chat(request: ChatRequest):
epoch = _session_epoch epoch = _session_epoch
session.add_message("user", request.text) session.add_message("user", request.text)
session._research_task = asyncio.create_task(_background_research(request.text)) # session._research_task = asyncio.create_task(_background_research(request.text))
try: try:
async with asyncio.timeout(20): async with asyncio.timeout(20):
@@ -1091,9 +1091,7 @@ async def chat(request: ChatRequest):
# Include conversation summary and show history for context # Include conversation summary and show history for context
conversation_summary = session.get_conversation_summary() conversation_summary = session.get_conversation_summary()
show_history = session.get_show_history() show_history = session.get_show_history()
news_ctx, research_ctx = _build_news_context() system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history)
system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history,
news_ctx, research_ctx)
messages = _normalize_messages_for_llm(session.conversation[-10:]) messages = _normalize_messages_for_llm(session.conversation[-10:])
response = await llm_service.generate( response = await llm_service.generate(
@@ -1622,9 +1620,7 @@ async def _trigger_ai_auto_respond(accumulated_text: str):
conversation_summary = session.get_conversation_summary() conversation_summary = session.get_conversation_summary()
show_history = session.get_show_history() show_history = session.get_show_history()
news_ctx, research_ctx = _build_news_context() system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history)
system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history,
news_ctx, research_ctx)
messages = _normalize_messages_for_llm(session.conversation[-10:]) messages = _normalize_messages_for_llm(session.conversation[-10:])
response = await llm_service.generate( response = await llm_service.generate(
@@ -1674,7 +1670,7 @@ async def _trigger_ai_auto_respond(accumulated_text: str):
broadcast_event("ai_done") broadcast_event("ai_done")
session._research_task = asyncio.create_task(_background_research(accumulated_text)) # session._research_task = asyncio.create_task(_background_research(accumulated_text))
# Also stream to active real caller so they hear the AI # Also stream to active real caller so they hear the AI
if session.active_real_caller: if session.active_real_caller:
@@ -1702,9 +1698,7 @@ async def ai_respond():
conversation_summary = session.get_conversation_summary() conversation_summary = session.get_conversation_summary()
show_history = session.get_show_history() show_history = session.get_show_history()
news_ctx, research_ctx = _build_news_context() system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history)
system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history,
news_ctx, research_ctx)
messages = _normalize_messages_for_llm(session.conversation[-10:]) messages = _normalize_messages_for_llm(session.conversation[-10:])
response = await llm_service.generate( response = await llm_service.generate(