Add show improvement features: crossfade, emotions, returning callers, transcripts, screening

- Music crossfade: smooth 3-second blend between tracks instead of hard stop/start
- Emotional detection: analyze host mood from recent messages so callers adapt tone
- AI caller summaries: generate call summaries with timestamps for show history
- Returning callers: persist regular callers across sessions with call history
- Session export: generate transcripts with speaker labels and chapter markers
- Caller screening: AI pre-screens phone callers to get name and topic while queued

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-07 02:43:01 -07:00
parent de5577e582
commit 356bf145b8
13 changed files with 3736 additions and 40 deletions

View File

@@ -25,6 +25,7 @@ from .services.llm import llm_service
from .services.tts import generate_speech
from .services.audio import audio_service
from .services.news import news_service, extract_keywords, STOP_WORDS
from .services.regulars import regular_caller_service
app = FastAPI(title="AI Radio Show")
@@ -115,7 +116,8 @@ CALLER_BASES = {
def _randomize_callers():
"""Assign random names and voices to callers, unique per gender."""
"""Assign random names and voices to callers, unique per gender.
Overrides 2-3 slots with returning regulars when available."""
num_m = sum(1 for c in CALLER_BASES.values() if c["gender"] == "male")
num_f = sum(1 for c in CALLER_BASES.values() if c["gender"] == "female")
males = random.sample(MALE_NAMES, num_m)
@@ -125,6 +127,8 @@ def _randomize_callers():
f_voices = random.sample(female_pool, min(num_f, len(female_pool)))
mi, fi = 0, 0
for base in CALLER_BASES.values():
base["returning"] = False
base["regular_id"] = None
if base["gender"] == "male":
base["name"] = males[mi]
base["voice"] = m_voices[mi]
@@ -134,6 +138,32 @@ def _randomize_callers():
base["voice"] = f_voices[fi]
fi += 1
# Override 2-3 random slots with returning callers
try:
returning = regular_caller_service.get_returning_callers(random.randint(2, 3))
if returning:
keys_by_gender = {"male": [], "female": []}
for k, v in CALLER_BASES.items():
keys_by_gender[v["gender"]].append(k)
for regular in returning:
gender = regular["gender"]
candidates = keys_by_gender.get(gender, [])
if not candidates:
continue
key = random.choice(candidates)
candidates.remove(key)
base = CALLER_BASES[key]
base["name"] = regular["name"]
base["returning"] = True
base["regular_id"] = regular["id"]
# Keep the randomly assigned voice — regulars sound different each time
if returning:
names = [r["name"] for r in returning]
print(f"[Regulars] Injected returning callers: {', '.join(names)}")
except Exception as e:
print(f"[Regulars] Failed to inject returning callers: {e}")
_randomize_callers() # Initial assignment
# Background components for dynamic generation
@@ -1239,10 +1269,68 @@ def pick_location() -> str:
return random.choice(LOCATIONS_OUT_OF_STATE)
def _generate_returning_caller_background(base: dict) -> str:
"""Generate background for a returning regular caller."""
regular_id = base.get("regular_id")
regulars = regular_caller_service.get_regulars()
regular = next((r for r in regulars if r["id"] == regular_id), None)
if not regular:
return generate_caller_background(base)
gender = regular["gender"]
age = regular["age"]
job = regular["job"]
location = regular["location"]
traits = regular.get("personality_traits", [])
# Build previous calls section
prev_calls = regular.get("call_history", [])
prev_section = ""
if prev_calls:
lines = [f"- {c['summary']}" for c in prev_calls[-3:]]
prev_section = "\nPREVIOUS CALLS:\n" + "\n".join(lines)
prev_section += "\nYou're calling back with an update — something has changed since last time. Reference your previous call(s) naturally."
# Reuse standard personality layers
interest1, interest2 = random.sample(INTERESTS, 2)
quirk1, quirk2 = random.sample(QUIRKS, 2)
people_pool = PEOPLE_MALE if gender == "male" else PEOPLE_FEMALE
person1, person2 = random.sample(people_pool, 2)
tic1, tic2 = random.sample(VERBAL_TICS, 2)
arc = random.choice(EMOTIONAL_ARCS)
vehicle = random.choice(VEHICLES)
having = random.choice(HAVING_RIGHT_NOW)
time_ctx = _get_time_context()
moon = _get_moon_phase()
season_ctx = _get_seasonal_context()
trait_str = ", ".join(traits) if traits else "a regular caller"
parts = [
f"{age}, {job} {location}. Returning caller — {trait_str}.",
f"{interest1.capitalize()}, {interest2}.",
f"{quirk1.capitalize()}, {quirk2}.",
f"\nRIGHT NOW: {time_ctx} Moon: {moon}.",
f"\nSEASON: {season_ctx}",
f"\nPEOPLE IN THEIR LIFE: {person1.capitalize()}. {person2.capitalize()}. Use their names when talking about them.",
f"\nDRIVES: {vehicle.capitalize()}.",
f"\nHAVING RIGHT NOW: {having}",
f"\nVERBAL HABITS: Tends to say \"{tic1}\" and \"{tic2}\" — use these naturally in conversation.",
f"\nEMOTIONAL ARC: {arc}",
f"\nRELATIONSHIP TO THE SHOW: Has called before. Comfortable on air. Knows Luke a bit. Might reference their last call.",
prev_section,
]
return " ".join(parts[:3]) + "".join(parts[3:])
def generate_caller_background(base: dict) -> str:
"""Generate a unique background for a caller (sync, no research).
~30% of callers are 'topic callers' who call about something interesting
instead of a personal problem. Includes full personality layers for realism."""
if base.get("returning") and base.get("regular_id"):
return _generate_returning_caller_background(base)
gender = base["gender"]
age = random.randint(*base["age_range"])
jobs = JOBS_MALE if gender == "male" else JOBS_FEMALE
@@ -1491,8 +1579,58 @@ async def enrich_caller_background(background: str) -> str:
return background
def detect_host_mood(messages: list[dict]) -> str:
"""Analyze recent host messages to detect mood signals for caller adaptation."""
host_msgs = [m["content"] for m in messages if m.get("role") in ("user", "host")][-5:]
if not host_msgs:
return ""
signals = []
# Check average word count — short responses suggest dismissiveness
avg_words = sum(len(m.split()) for m in host_msgs) / len(host_msgs)
if avg_words < 8:
signals.append("The host is giving short responses — they might be losing interest, testing you, or waiting for you to bring something real. Don't ramble. Get to the point or change the subject.")
# Pushback patterns
pushback_phrases = ["i don't think", "that's not", "come on", "really?", "i disagree",
"that doesn't", "are you sure", "i don't buy", "no way", "but that's",
"hold on", "wait a minute", "let's be honest"]
pushback_count = sum(1 for m in host_msgs for p in pushback_phrases if p in m.lower())
if pushback_count >= 2:
signals.append("The host is pushing back — they're challenging you. Don't fold immediately. Defend your position or concede specifically, not generically.")
# Supportive patterns
supportive_phrases = ["i hear you", "that makes sense", "i get it", "that's real",
"i feel you", "you're right", "absolutely", "exactly", "good for you",
"i respect that", "that took guts", "i'm glad you"]
supportive_count = sum(1 for m in host_msgs for p in supportive_phrases if p in m.lower())
if supportive_count >= 2:
signals.append("The host is being supportive — they're with you. You can go deeper. Share something you've been holding back.")
# Joking patterns
joke_indicators = ["haha", "lmao", "lol", "that's hilarious", "no way", "you're killing me",
"shut up", "get out", "are you serious", "you're joking"]
joke_count = sum(1 for m in host_msgs for p in joke_indicators if p in m.lower())
if joke_count >= 2:
signals.append("The host is in a playful mood — joking around. You can joke back, lean into the humor, but you can also use it as a door to something real.")
# Probing — lots of questions
question_count = sum(m.count("?") for m in host_msgs)
if question_count >= 3:
signals.append("The host is asking a lot of questions — they're digging. Give them real answers. Don't deflect.")
if not signals:
return ""
# Cap at 2 signals
signals = signals[:2]
return "\nEMOTIONAL READ ON THE HOST:\n" + "\n".join(f"- {s}" for s in signals) + "\n"
def get_caller_prompt(caller: dict, conversation_summary: str = "", show_history: str = "",
news_context: str = "", research_context: str = "") -> str:
news_context: str = "", research_context: str = "",
emotional_read: str = "") -> str:
"""Generate a natural system prompt for a caller"""
context = ""
if conversation_summary:
@@ -1519,7 +1657,7 @@ Continue naturally. Don't repeat yourself.
return f"""You're {caller['name']}, calling a late-night radio show called "Luke at the Roost." It's late. You trust this host.
{caller['vibe']}
{history}{context}{world_context}
{history}{context}{world_context}{emotional_read}
HOW TO TALK:
- Sound like a real person on the phone, not an essay. This is a conversation, not a monologue.
- VARY YOUR LENGTH. Sometimes one sentence. Sometimes two or three. Match the moment.
@@ -1607,6 +1745,8 @@ class CallRecord:
caller_name: str # "Tony" or "Caller #3"
summary: str # LLM-generated summary after hangup
transcript: list[dict] = field(default_factory=list)
started_at: float = 0.0
ended_at: float = 0.0
class Session:
@@ -1616,6 +1756,7 @@ class Session:
self.conversation: list[dict] = []
self.caller_backgrounds: dict[str, str] = {} # Generated backgrounds for this session
self.call_history: list[CallRecord] = []
self._call_started_at: float = 0.0
self.active_real_caller: dict | None = None
self.ai_respond_mode: str = "manual" # "manual" or "auto"
self.auto_followup: bool = False
@@ -1626,13 +1767,14 @@ class Session:
def start_call(self, caller_key: str):
self.current_caller_key = caller_key
self.conversation = []
self._call_started_at = time.time()
def end_call(self):
self.current_caller_key = None
self.conversation = []
def add_message(self, role: str, content: str):
self.conversation.append({"role": role, "content": content})
self.conversation.append({"role": role, "content": content, "timestamp": time.time()})
def get_caller_background(self, caller_key: str) -> str:
"""Get or generate background for a caller in this session"""
@@ -1977,7 +2119,7 @@ async def get_callers():
"""Get list of available callers"""
return {
"callers": [
{"key": k, "name": v["name"]}
{"key": k, "name": v["name"], "returning": v.get("returning", False)}
for k, v in CALLER_BASES.items()
],
"current": session.current_caller_key,
@@ -1985,6 +2127,12 @@ async def get_callers():
}
@app.get("/api/regulars")
async def get_regulars():
"""Get list of regular callers"""
return {"regulars": regular_caller_service.get_regulars()}
@app.post("/api/session/reset")
async def reset_session():
"""Reset session - all callers get fresh backgrounds"""
@@ -2037,6 +2185,9 @@ async def hangup():
session._research_task = None
caller_name = session.caller["name"] if session.caller else None
caller_key = session.current_caller_key
conversation_snapshot = list(session.conversation)
call_started = getattr(session, '_call_started_at', 0.0)
session.end_call()
# Play hangup sound in background so response returns immediately
@@ -2044,9 +2195,74 @@ async def hangup():
if hangup_sound.exists():
threading.Thread(target=audio_service.play_sfx, args=(str(hangup_sound),), daemon=True).start()
# Generate summary for AI caller in background
if caller_name and conversation_snapshot:
asyncio.create_task(_summarize_ai_call(caller_key, caller_name, conversation_snapshot, call_started))
return {"status": "disconnected", "caller": caller_name}
async def _summarize_ai_call(caller_key: str, caller_name: str, conversation: list[dict], started_at: float):
"""Background task: summarize AI caller conversation and store in history"""
ended_at = time.time()
summary = ""
if conversation:
transcript_text = "\n".join(
f"{msg['role']}: {msg['content']}" for msg in conversation
)
try:
summary = await llm_service.generate(
messages=[{"role": "user", "content": f"Summarize this radio show call in 1-2 sentences:\n{transcript_text}"}],
system_prompt="You summarize radio show conversations concisely. Focus on what the caller talked about and any emotional moments.",
)
except Exception as e:
print(f"[AI Summary] Failed to generate summary: {e}")
summary = f"{caller_name} called in."
session.call_history.append(CallRecord(
caller_type="ai",
caller_name=caller_name,
summary=summary,
transcript=conversation,
started_at=started_at,
ended_at=ended_at,
))
print(f"[AI Summary] {caller_name} call summarized: {summary[:80]}...")
# Returning caller promotion/update logic
try:
base = CALLER_BASES.get(caller_key) if caller_key else None
if base and summary:
if base.get("returning") and base.get("regular_id"):
# Update existing regular's call history
regular_caller_service.update_after_call(base["regular_id"], summary)
elif len(conversation) >= 6 and random.random() < 0.20:
# 20% chance to promote first-timer with 6+ messages
bg = session.caller_backgrounds.get(caller_key, "")
traits = []
for label in ["QUIRK", "STRONG OPINION", "SECRET SIDE", "FOOD OPINION"]:
for line in bg.split("\n"):
if label in line:
traits.append(line.split(":", 1)[-1].strip()[:80])
break
# Extract job and location from first line of background
first_line = bg.split(".")[0] if bg else ""
parts = first_line.split(",", 1)
job_loc = parts[1].strip() if len(parts) > 1 else ""
job_parts = job_loc.rsplit(" in ", 1) if " in " in job_loc else (job_loc, "unknown")
regular_caller_service.add_regular(
name=caller_name,
gender=base.get("gender", "male"),
age=random.randint(*base.get("age_range", (30, 50))),
job=job_parts[0].strip() if isinstance(job_parts, tuple) else job_parts[0],
location="in " + job_parts[1].strip() if isinstance(job_parts, tuple) and len(job_parts) > 1 else "unknown",
personality_traits=traits[:4],
first_call_summary=summary,
)
except Exception as e:
print(f"[Regulars] Promotion logic error: {e}")
# --- Chat & TTS Endpoints ---
import re
@@ -2174,7 +2390,8 @@ async def chat(request: ChatRequest):
conversation_summary = session.get_conversation_summary()
show_history = session.get_show_history()
system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history)
mood = detect_host_mood(session.conversation)
system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history, emotional_read=mood)
messages = _normalize_messages_for_llm(session.conversation[-10:])
response = await llm_service.generate(
@@ -2276,11 +2493,14 @@ async def get_music():
@app.post("/api/music/play")
async def play_music(request: MusicRequest):
"""Load and play a music track"""
"""Load and play a music track, crossfading if already playing"""
track_path = settings.music_dir / request.track
if not track_path.exists():
raise HTTPException(404, "Track not found")
if audio_service.is_music_playing():
audio_service.crossfade_to(str(track_path))
else:
audio_service.load_music(str(track_path))
audio_service.play_music()
return {"status": "playing", "track": request.track}
@@ -2352,6 +2572,9 @@ async def play_ad(request: MusicRequest):
if not ad_path.exists():
raise HTTPException(404, "Ad not found")
if audio_service._music_playing:
audio_service.stop_music(fade_duration=1.0)
await asyncio.sleep(1.1)
audio_service.play_ad(str(ad_path))
return {"status": "playing", "track": request.track}
@@ -2393,6 +2616,126 @@ async def update_settings(data: dict):
return llm_service.get_settings()
# --- Caller Screening ---
SCREENING_PROMPT = """You are a friendly, brief phone screener for "Luke at the Roost" radio show.
Your job: Get the caller's first name and what they want to talk about. That's it.
Rules:
- Be warm but brief (1-2 sentences per response)
- First ask their name, then ask what they want to talk about
- After you have both, say something like "Great, sit tight and we'll get you on with Luke!"
- Never pretend to be Luke or the host
- Keep it casual and conversational
- If they're hard to understand, ask them to repeat"""
_screening_audio_buffers: dict[str, bytearray] = {}
async def _start_screening_greeting(caller_id: str):
"""Send initial screening greeting to queued caller after brief delay"""
await asyncio.sleep(2) # Wait for stream to stabilize
ws = caller_service._websockets.get(caller_id)
if not ws:
return
caller_service.start_screening(caller_id)
greeting = "Hey there! Thanks for calling Luke at the Roost. What's your name?"
caller_service.update_screening(caller_id, screener_text=greeting)
try:
audio_bytes = await generate_speech(greeting, "Sarah", "none")
if audio_bytes:
await caller_service.stream_audio_to_caller(caller_id, audio_bytes, 24000)
except Exception as e:
print(f"[Screening] Greeting TTS failed: {e}")
async def _handle_screening_audio(caller_id: str, pcm_data: bytes, sample_rate: int):
"""Process audio from a queued caller for screening conversation"""
state = caller_service.get_screening_state(caller_id)
if not state or state["status"] == "complete":
return
# Skip if TTS is currently streaming to this caller
if caller_service.is_streaming_tts(caller_id):
return
# Transcribe caller speech
try:
text = await transcribe_audio(pcm_data, source_sample_rate=sample_rate)
except Exception as e:
print(f"[Screening] Transcription failed: {e}")
return
if not text or not text.strip():
return
print(f"[Screening] Caller {caller_id}: {text}")
caller_service.update_screening(caller_id, caller_text=text)
# Build conversation for LLM
messages = []
for msg in state["conversation"]:
role = "assistant" if msg["role"] == "screener" else "user"
messages.append({"role": role, "content": msg["content"]})
# Generate screener response
try:
response = await llm_service.generate(
messages=messages,
system_prompt=SCREENING_PROMPT
)
except Exception as e:
print(f"[Screening] LLM failed: {e}")
return
if not response or not response.strip():
return
response = response.strip()
print(f"[Screening] Screener → {caller_id}: {response}")
caller_service.update_screening(caller_id, screener_text=response)
# After 2+ caller responses, try to extract name and topic
if state["response_count"] >= 2:
try:
extract_prompt = f"""From this screening conversation, extract the caller's name and topic.
Conversation:
{chr(10).join(f'{m["role"]}: {m["content"]}' for m in state["conversation"])}
Respond with ONLY JSON: {{"name": "their first name or null", "topic": "brief topic or null"}}"""
extract = await llm_service.generate(
messages=[{"role": "user", "content": extract_prompt}],
system_prompt="You extract structured data from conversations. Respond with only valid JSON."
)
json_match = re.search(r'\{[^}]+\}', extract)
if json_match:
info = json.loads(json_match.group())
if info.get("name"):
caller_service.update_screening(caller_id, caller_name=info["name"])
if info.get("topic"):
caller_service.update_screening(caller_id, topic=info["topic"])
if info.get("name") and info.get("topic"):
caller_service.end_screening(caller_id)
broadcast_event("screening_complete", {
"caller_id": caller_id,
"name": info["name"],
"topic": info["topic"]
})
except Exception as e:
print(f"[Screening] Extract failed: {e}")
# TTS the screener response back to caller
try:
audio_bytes = await generate_speech(response, "Sarah", "none")
if audio_bytes:
await caller_service.stream_audio_to_caller(caller_id, audio_bytes, 24000)
except Exception as e:
print(f"[Screening] Response TTS failed: {e}")
@app.websocket("/api/signalwire/stream")
async def signalwire_audio_stream(websocket: WebSocket):
"""Handle SignalWire bidirectional audio stream"""
@@ -2402,6 +2745,7 @@ async def signalwire_audio_stream(websocket: WebSocket):
caller_phone = "Unknown"
call_sid = ""
audio_buffer = bytearray()
screening_buffer = bytearray()
CHUNK_DURATION_S = 3
SAMPLE_RATE = 16000
chunk_samples = CHUNK_DURATION_S * SAMPLE_RATE
@@ -2448,6 +2792,9 @@ async def signalwire_audio_stream(websocket: WebSocket):
if stream_sid:
caller_service.register_stream_sid(caller_id, stream_sid)
# Start screening conversation
asyncio.create_task(_start_screening_greeting(caller_id))
elif event == "media" and stream_started:
try:
payload = msg.get("media", {}).get("payload", "")
@@ -2458,6 +2805,16 @@ async def signalwire_audio_stream(websocket: WebSocket):
call_info = caller_service.active_calls.get(caller_id)
if not call_info:
# Caller is queued, not on air — route to screening
screening_buffer.extend(pcm_data)
if len(screening_buffer) >= chunk_samples * 2:
pcm_chunk = bytes(screening_buffer[:chunk_samples * 2])
screening_buffer = screening_buffer[chunk_samples * 2:]
audio_check = np.frombuffer(pcm_chunk, dtype=np.int16).astype(np.float32) / 32768.0
if np.abs(audio_check).max() >= 0.01:
asyncio.create_task(
_handle_screening_audio(caller_id, pcm_chunk, SAMPLE_RATE)
)
continue
audio_buffer.extend(pcm_data)
@@ -2713,7 +3070,8 @@ async def _trigger_ai_auto_respond(accumulated_text: str):
conversation_summary = session.get_conversation_summary()
show_history = session.get_show_history()
system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history)
mood = detect_host_mood(session.conversation)
system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history, emotional_read=mood)
messages = _normalize_messages_for_llm(session.conversation[-10:])
response = await llm_service.generate(
@@ -2785,7 +3143,8 @@ async def ai_respond():
conversation_summary = session.get_conversation_summary()
show_history = session.get_show_history()
system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history)
mood = detect_host_mood(session.conversation)
system_prompt = get_caller_prompt(session.caller, conversation_summary, show_history, emotional_read=mood)
messages = _normalize_messages_for_llm(session.conversation[-10:])
response = await llm_service.generate(
@@ -2856,6 +3215,7 @@ async def hangup_real_caller():
caller_id = session.active_real_caller["caller_id"]
caller_phone = session.active_real_caller["phone"]
conversation_snapshot = list(session.conversation)
call_started = getattr(session, '_call_started_at', 0.0)
auto_followup_enabled = session.auto_followup
# End the phone call via SignalWire
@@ -2875,7 +3235,7 @@ async def hangup_real_caller():
threading.Thread(target=audio_service.play_sfx, args=(str(hangup_sound),), daemon=True).start()
asyncio.create_task(
_summarize_real_call(caller_phone, conversation_snapshot, auto_followup_enabled)
_summarize_real_call(caller_phone, conversation_snapshot, call_started, auto_followup_enabled)
)
return {
@@ -2884,8 +3244,9 @@ async def hangup_real_caller():
}
async def _summarize_real_call(caller_phone: str, conversation: list, auto_followup_enabled: bool):
async def _summarize_real_call(caller_phone: str, conversation: list, started_at: float, auto_followup_enabled: bool):
"""Background task: summarize call and store in history"""
ended_at = time.time()
summary = ""
if conversation:
transcript_text = "\n".join(
@@ -2901,6 +3262,8 @@ async def _summarize_real_call(caller_phone: str, conversation: list, auto_follo
caller_name=caller_phone,
summary=summary,
transcript=conversation,
started_at=started_at,
ended_at=ended_at,
))
print(f"[Real Caller] {caller_phone} call summarized: {summary[:80]}...")
@@ -2963,6 +3326,70 @@ async def set_auto_followup(data: dict):
return {"enabled": session.auto_followup}
# --- Transcript & Chapter Export ---
@app.get("/api/session/export")
async def export_session():
"""Export session transcript with speaker labels and chapters from call boundaries"""
if not session.call_history:
raise HTTPException(400, "No calls in this session to export")
# Find the earliest call start as session base time
session_start = min(
(r.started_at for r in session.call_history if r.started_at > 0),
default=time.time()
)
transcript_lines = []
chapters = []
for i, record in enumerate(session.call_history):
# Chapter from call start time
offset_seconds = max(0, record.started_at - session_start) if record.started_at > 0 else 0
chapter_title = f"{record.caller_name}"
if record.summary:
# Use first sentence of summary for chapter title
short_summary = record.summary.split(".")[0].strip()
if short_summary:
chapter_title += f" \u2014 {short_summary}"
chapters.append({"startTime": round(offset_seconds), "title": chapter_title})
# Separator between calls
if i > 0:
transcript_lines.append("")
transcript_lines.append(f"--- Call {i + 1}: {record.caller_name} ---")
transcript_lines.append("")
# Transcript lines with timestamps
for msg in record.transcript:
msg_offset = msg.get("timestamp", 0) - session_start if msg.get("timestamp") else offset_seconds
if msg_offset < 0:
msg_offset = 0
mins = int(msg_offset // 60)
secs = int(msg_offset % 60)
role = msg.get("role", "")
if role in ("user", "host"):
speaker = "HOST"
elif role.startswith("real_caller:"):
speaker = role.split(":", 1)[1].upper()
elif role.startswith("ai_caller:"):
speaker = role.split(":", 1)[1].upper()
elif role == "assistant":
speaker = record.caller_name.upper()
else:
speaker = role.upper()
transcript_lines.append(f"[{mins:02d}:{secs:02d}] {speaker}: {msg['content']}")
return {
"session_id": session.id,
"transcript": "\n".join(transcript_lines),
"chapters": chapters,
"call_count": len(session.call_history),
}
# --- Server Control Endpoints ---
import subprocess

View File

@@ -53,6 +53,14 @@ class AudioService:
self._music_volume: float = 0.3
self._music_loop: bool = True
# Music crossfade state
self._crossfade_active: bool = False
self._crossfade_old_data: Optional[np.ndarray] = None
self._crossfade_old_position: int = 0
self._crossfade_progress: float = 0.0
self._crossfade_samples: int = 0
self._crossfade_step: float = 0.0
# Caller playback state
self._caller_stop_event = threading.Event()
self._caller_thread: Optional[threading.Thread] = None
@@ -578,6 +586,55 @@ class AudioService:
print(f"Failed to load music: {e}")
return False
def crossfade_to(self, file_path: str, duration: float = 3.0):
"""Crossfade from current music track to a new one"""
import librosa
if not self._music_playing or self._music_resampled is None:
if self.load_music(file_path):
self.play_music()
return
# Load the new track
path = Path(file_path)
if not path.exists():
print(f"Music file not found: {file_path}")
return
try:
audio, sr = librosa.load(str(path), sr=self.output_sample_rate, mono=True)
new_data = audio.astype(np.float32)
except Exception as e:
print(f"Failed to load music for crossfade: {e}")
return
# Get device sample rate for resampling
if self.output_device is not None:
device_info = sd.query_devices(self.output_device)
device_sr = int(device_info['default_samplerate'])
else:
device_sr = self.output_sample_rate
if self.output_sample_rate != device_sr:
new_resampled = librosa.resample(new_data, orig_sr=self.output_sample_rate, target_sr=device_sr)
else:
new_resampled = new_data.copy()
# Swap: current becomes old, new becomes current
self._crossfade_old_data = self._music_resampled
self._crossfade_old_position = self._music_position
self._music_resampled = new_resampled
self._music_data = new_data
self._music_position = 0
# Configure crossfade timing
self._crossfade_samples = int(device_sr * duration)
self._crossfade_progress = 0.0
self._crossfade_step = 1.0 / self._crossfade_samples if self._crossfade_samples > 0 else 1.0
self._crossfade_active = True
print(f"Crossfading to {path.name} over {duration}s")
def play_music(self):
"""Start music playback to specific channel"""
import librosa
@@ -625,25 +682,55 @@ class AudioService:
if not self._music_playing or self._music_resampled is None:
return
# Read new track samples
end_pos = self._music_position + frames
if end_pos <= len(self._music_resampled):
outdata[:, channel_idx] = self._music_resampled[self._music_position:end_pos] * self._music_volume
new_samples = self._music_resampled[self._music_position:end_pos].copy()
self._music_position = end_pos
else:
remaining = len(self._music_resampled) - self._music_position
new_samples = np.zeros(frames, dtype=np.float32)
if remaining > 0:
outdata[:remaining, channel_idx] = self._music_resampled[self._music_position:] * self._music_volume
new_samples[:remaining] = self._music_resampled[self._music_position:]
if self._music_loop:
self._music_position = 0
wrap_frames = frames - remaining
if wrap_frames > 0:
outdata[remaining:, channel_idx] = self._music_resampled[:wrap_frames] * self._music_volume
new_samples[remaining:] = self._music_resampled[:wrap_frames]
self._music_position = wrap_frames
else:
self._music_position = len(self._music_resampled)
if remaining <= 0:
self._music_playing = False
if self._crossfade_active and self._crossfade_old_data is not None:
# Read old track samples
old_end = self._crossfade_old_position + frames
if old_end <= len(self._crossfade_old_data):
old_samples = self._crossfade_old_data[self._crossfade_old_position:old_end]
self._crossfade_old_position = old_end
else:
old_remaining = len(self._crossfade_old_data) - self._crossfade_old_position
old_samples = np.zeros(frames, dtype=np.float32)
if old_remaining > 0:
old_samples[:old_remaining] = self._crossfade_old_data[self._crossfade_old_position:]
self._crossfade_old_position = len(self._crossfade_old_data)
# Compute fade curves for this chunk
start_progress = self._crossfade_progress
end_progress = min(1.0, start_progress + self._crossfade_step * frames)
fade_in = np.linspace(start_progress, end_progress, frames, dtype=np.float32)
fade_out = 1.0 - fade_in
outdata[:, channel_idx] = (old_samples * fade_out + new_samples * fade_in) * self._music_volume
self._crossfade_progress = end_progress
if self._crossfade_progress >= 1.0:
self._crossfade_active = False
self._crossfade_old_data = None
print("Crossfade complete")
else:
outdata[:, channel_idx] = new_samples * self._music_volume
try:
self._music_stream = sd.OutputStream(
device=device,
@@ -659,15 +746,48 @@ class AudioService:
print(f"Music playback error: {e}")
self._music_playing = False
def stop_music(self):
"""Stop music playback"""
def stop_music(self, fade_duration: float = 2.0):
"""Stop music playback with fade out"""
if not self._music_playing or not self._music_stream:
self._music_playing = False
if self._music_stream:
self._music_stream.stop()
self._music_stream.close()
self._music_stream = None
self._music_position = 0
return
if fade_duration <= 0:
self._music_playing = False
self._music_stream.stop()
self._music_stream.close()
self._music_stream = None
self._music_position = 0
print("Music stopped")
return
import threading
original_volume = self._music_volume
steps = 20
step_time = fade_duration / steps
def _fade():
for i in range(steps):
if not self._music_playing:
break
self._music_volume = original_volume * (1 - (i + 1) / steps)
import time
time.sleep(step_time)
self._music_playing = False
if self._music_stream:
self._music_stream.stop()
self._music_stream.close()
self._music_stream = None
self._music_position = 0
self._music_volume = original_volume
print("Music faded out and stopped")
threading.Thread(target=_fade, daemon=True).start()
def play_ad(self, file_path: str):
"""Load and play an ad file once (no loop) on the ad channel"""

View File

@@ -25,6 +25,7 @@ class CallerService:
self._stream_sids: dict[str, str] = {} # caller_id -> SignalWire streamSid
self._send_locks: dict[str, asyncio.Lock] = {} # per-caller send lock
self._streaming_tts: set[str] = set() # caller_ids currently receiving TTS
self._screening_state: dict[str, dict] = {} # caller_id -> screening conversation
def _get_send_lock(self, caller_id: str) -> asyncio.Lock:
if caller_id not in self._send_locks:
@@ -51,18 +52,6 @@ class CallerService:
self._queue = [c for c in self._queue if c["caller_id"] != caller_id]
print(f"[Caller] {caller_id} removed from queue")
def get_queue(self) -> list[dict]:
now = time.time()
with self._lock:
return [
{
"caller_id": c["caller_id"],
"phone": c["phone"],
"wait_time": int(now - c["queued_at"]),
}
for c in self._queue
]
def allocate_channel(self) -> int:
with self._lock:
ch = self.FIRST_REAL_CHANNEL
@@ -111,6 +100,7 @@ class CallerService:
self._call_sids.pop(caller_id, None)
self._stream_sids.pop(caller_id, None)
self._send_locks.pop(caller_id, None)
self._screening_state.pop(caller_id, None)
def reset(self):
with self._lock:
@@ -125,8 +115,72 @@ class CallerService:
self._stream_sids.clear()
self._send_locks.clear()
self._streaming_tts.clear()
self._screening_state.clear()
print("[Caller] Service reset")
# --- Screening ---
def start_screening(self, caller_id: str):
"""Initialize screening state for a queued caller"""
self._screening_state[caller_id] = {
"conversation": [],
"caller_name": None,
"topic": None,
"status": "screening", # screening, complete
"response_count": 0,
}
print(f"[Screening] Started for {caller_id}")
def get_screening_state(self, caller_id: str) -> Optional[dict]:
return self._screening_state.get(caller_id)
def update_screening(self, caller_id: str, caller_text: str = None,
screener_text: str = None, caller_name: str = None,
topic: str = None):
"""Update screening conversation and extracted info"""
state = self._screening_state.get(caller_id)
if not state:
return
if caller_text:
state["conversation"].append({"role": "caller", "content": caller_text})
state["response_count"] += 1
if screener_text:
state["conversation"].append({"role": "screener", "content": screener_text})
if caller_name:
state["caller_name"] = caller_name
if topic:
state["topic"] = topic
def end_screening(self, caller_id: str):
"""Mark screening as complete"""
state = self._screening_state.get(caller_id)
if state:
state["status"] = "complete"
print(f"[Screening] Complete for {caller_id}: name={state.get('caller_name')}, topic={state.get('topic')}")
def get_queue(self) -> list[dict]:
"""Get queue with screening info enrichment"""
now = time.time()
with self._lock:
result = []
for c in self._queue:
entry = {
"caller_id": c["caller_id"],
"phone": c["phone"],
"wait_time": int(now - c["queued_at"]),
}
screening = self._screening_state.get(c["caller_id"])
if screening:
entry["screening_status"] = screening["status"]
entry["caller_name"] = screening.get("caller_name")
entry["screening_summary"] = screening.get("topic")
else:
entry["screening_status"] = None
entry["caller_name"] = None
entry["screening_summary"] = None
result.append(entry)
return result
def register_websocket(self, caller_id: str, websocket):
"""Register a WebSocket for a caller"""
self._websockets[caller_id] = websocket

View File

@@ -0,0 +1,95 @@
"""Returning caller persistence service"""
import json
import time
import uuid
from pathlib import Path
from typing import Optional
DATA_FILE = Path(__file__).parent.parent.parent / "data" / "regulars.json"
MAX_REGULARS = 12
class RegularCallerService:
"""Manages persistent 'regular' callers who return across sessions"""
def __init__(self):
self._regulars: list[dict] = []
self._load()
def _load(self):
if DATA_FILE.exists():
try:
with open(DATA_FILE) as f:
data = json.load(f)
self._regulars = data.get("regulars", [])
print(f"[Regulars] Loaded {len(self._regulars)} regular callers")
except Exception as e:
print(f"[Regulars] Failed to load: {e}")
self._regulars = []
def _save(self):
try:
DATA_FILE.parent.mkdir(parents=True, exist_ok=True)
with open(DATA_FILE, "w") as f:
json.dump({"regulars": self._regulars}, f, indent=2)
except Exception as e:
print(f"[Regulars] Failed to save: {e}")
def get_regulars(self) -> list[dict]:
return list(self._regulars)
def get_returning_callers(self, count: int = 2) -> list[dict]:
"""Get up to `count` regulars for returning caller slots"""
import random
if not self._regulars:
return []
available = [r for r in self._regulars if len(r.get("call_history", [])) > 0]
if not available:
return []
return random.sample(available, min(count, len(available)))
def add_regular(self, name: str, gender: str, age: int, job: str,
location: str, personality_traits: list[str],
first_call_summary: str) -> dict:
"""Promote a first-time caller to regular"""
# Retire oldest if at cap
if len(self._regulars) >= MAX_REGULARS:
self._regulars.sort(key=lambda r: r.get("last_call", 0))
retired = self._regulars.pop(0)
print(f"[Regulars] Retired {retired['name']} to make room")
regular = {
"id": str(uuid.uuid4())[:8],
"name": name,
"gender": gender,
"age": age,
"job": job,
"location": location,
"personality_traits": personality_traits,
"call_history": [
{"summary": first_call_summary, "timestamp": time.time()}
],
"last_call": time.time(),
"created_at": time.time(),
}
self._regulars.append(regular)
self._save()
print(f"[Regulars] Promoted {name} to regular (total: {len(self._regulars)})")
return regular
def update_after_call(self, regular_id: str, call_summary: str):
"""Update a regular's history after a returning call"""
for regular in self._regulars:
if regular["id"] == regular_id:
regular.setdefault("call_history", []).append(
{"summary": call_summary, "timestamp": time.time()}
)
regular["last_call"] = time.time()
self._save()
print(f"[Regulars] Updated {regular['name']} call history ({len(regular['call_history'])} calls)")
return
print(f"[Regulars] Regular {regular_id} not found for update")
regular_caller_service = RegularCallerService()

1
data/regulars.json Normal file
View File

@@ -0,0 +1 @@
{"regulars": []}

View File

@@ -0,0 +1,189 @@
# Real Callers + AI Follow-Up Design
## Overview
Add real phone callers to the AI Radio Show via Twilio, alongside existing AI callers. Real callers dial a phone number, wait in a hold queue, and get taken on air by the host. Three-way conversations between host, real caller, and AI caller are supported. AI follow-up callers automatically reference what real callers said.
## Requirements
- Real callers connect via Twilio phone number
- Full-duplex audio — host and caller talk simultaneously, talk over each other
- Each real caller gets their own dedicated audio channel for recording
- Three-way calls: host + real caller + AI caller all live at once
- AI caller can respond manually (host-triggered) or automatically (listens and decides when to jump in)
- AI follow-up callers reference real caller conversations via show history
- Auto follow-up mode: system picks an AI caller and connects them after a real call
- Simple hold queue — callers wait with hold music, host sees list and picks who goes on air
- Twilio webhooks exposed via Cloudflare tunnel
## Architecture
### Audio Routing (Loopback Channels)
```
Ch 1: Host mic (existing)
Ch 2: AI callers / TTS (existing)
Ch 3+: Real callers (dynamically assigned per call)
Ch N-1: Music (existing)
Ch N: SFX (existing)
```
### Call Flow — Real Caller
```
Caller dials Twilio number
→ Twilio POST /api/twilio/voice
→ TwiML response: greeting + enqueue with hold music
→ Caller waits in hold queue
→ Host sees caller in dashboard queue panel
→ Host clicks "Take Call"
→ POST /api/queue/take/{call_sid}
→ Twilio opens WebSocket to /api/twilio/stream
→ Bidirectional audio:
Caller audio → decode mulaw → dedicated Loopback channel
Host audio + AI TTS → encode mulaw → Twilio → caller hears both
→ Real-time Whisper transcription of caller audio
→ Host hangs up → call summarized → stored in show history
```
### Three-Way Call Flow
```
Host mic ──────→ Ch 1 (recording)
→ Twilio outbound (real caller hears you)
→ Whisper transcription (AI gets your words)
Real caller ──→ Ch 3+ (recording, dedicated channel)
→ Whisper transcription (AI gets their words)
→ Host headphones
AI TTS ───────→ Ch 2 (recording)
→ Twilio outbound (real caller hears AI)
→ Host headphones (already works)
```
Conversation history becomes three-party with role labels: `host`, `real_caller`, `ai_caller`.
### AI Auto-Respond Mode
When toggled on, after each real caller transcription chunk:
1. Lightweight LLM call ("should I respond?" — use fast model like Haiku)
2. If YES → full response generated → TTS → plays on AI channel + streams to Twilio
3. Cooldown (~10s) prevents rapid-fire
4. Host can override with mute button
### AI Follow-Up System
After a real caller hangs up:
1. Full transcript (host + real caller + any AI) summarized by LLM
2. Summary stored in `session.call_history`
3. Next AI caller's system prompt includes show history:
```
EARLIER IN THE SHOW:
- Dave (real caller) called about his wife leaving after 12 years.
He got emotional about his kids.
- Jasmine called about her boss hitting on her at work.
You can reference these if it feels natural. Don't force it.
```
**Host-triggered (default):** Click any AI caller as normal. They already have show context.
**Auto mode:** After real caller hangs up, system waits ~5-10s, picks a fitting AI caller via short LLM call, biases their background generation toward the topic, auto-connects.
## Backend Changes
### New Module: `backend/services/twilio_service.py`
Manages Twilio integration:
- WebSocket handler for Media Streams (decode/encode mulaw 8kHz ↔ PCM)
- Call queue state (waiting callers, SIDs, timestamps, assigned channels)
- Channel pool management (allocate/release Loopback channels for real callers)
- Outbound audio mixing (host + AI TTS → mulaw → Twilio)
- Methods: `take_call()`, `hangup_real_caller()`, `get_queue()`, `send_audio_to_caller()`
### New Endpoints
```python
# Twilio webhooks
POST /api/twilio/voice # Incoming call → TwiML (greet + enqueue)
POST /api/twilio/hold-music # Hold music TwiML for waiting callers
WS /api/twilio/stream # Media Streams WebSocket (bidirectional audio)
# Host controls
GET /api/queue # List waiting callers (number, wait time)
POST /api/queue/take/{call_sid} # Dequeue caller → start media stream
POST /api/queue/drop/{call_sid} # Drop caller from queue
# AI follow-up
POST /api/followup/generate # Summarize last real call, trigger AI follow-up
```
### Session Model Changes
```python
class CallRecord:
caller_type: str # "ai" or "real"
caller_name: str # "Tony" or "Caller #3"
summary: str # LLM-generated summary after hangup
transcript: list[dict] # Full conversation [{role, content}]
class Session:
# Existing fields...
call_history: list[CallRecord] # All calls this episode
active_real_caller: dict | None # {call_sid, phone, channel, name}
active_ai_caller: str | None # Caller key
ai_respond_mode: str # "manual" or "auto"
auto_followup: bool # Auto-generate AI follow-up after real calls
```
Three-party conversation history uses roles: `host`, `real_caller:{name}`, `ai_caller:{name}`.
### AI Caller Prompt Changes
`get_caller_prompt()` extended to include:
- Show history from `session.call_history`
- Current real caller context (if three-way call active)
- Instructions for referencing real callers naturally
## Frontend Changes
### New: Call Queue Panel
Between callers section and chat. Shows waiting real callers with phone number and wait time. "Take Call" and "Drop" buttons per caller. Polls `/api/queue` every few seconds.
### Modified: Active Call Indicator
Shows real caller and AI caller simultaneously when both active:
- Real caller: name, channel number, call duration, hang up button
- AI caller: name, Manual/Auto toggle, "Let [name] respond" button (manual mode)
- Auto Follow-Up checkbox
### Modified: Chat Log
Three-party with visual distinction:
- Host messages: existing style
- Real caller: labeled "Dave (caller)", distinct color
- AI caller: labeled "Tony (AI)", distinct color
### Modified: Caller Grid
When real caller is active, clicking an AI caller adds them as third party instead of starting fresh call. Indicator shows which AI callers have been on the show this session.
## Dependencies
- `twilio` Python package (for TwiML generation, REST API)
- Twilio account with phone number (~$1.15/mo + per-minute)
- Cloudflare tunnel for exposing webhook endpoints
- `audioop` or equivalent for mulaw encode/decode (stdlib in Python 3.11)
## Configuration
New env vars in `.env`:
```
TWILIO_ACCOUNT_SID=...
TWILIO_AUTH_TOKEN=...
TWILIO_PHONE_NUMBER=+1...
TWILIO_WEBHOOK_BASE_URL=https://your-tunnel.cloudflare.com
```

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,52 @@
# SignalWire Phone Call-In Design
## Goal
Replace browser-based WebSocket call-in with real phone calls via SignalWire. Callers dial 208-439-5853 and enter the show queue.
## Architecture
SignalWire handles PSTN connectivity. When a call comes in, SignalWire hits our webhook, we return XML telling it to open a bidirectional WebSocket stream with L16@16kHz audio. The audio flows through our existing pipeline — same queue, channel allocation, transcription, host mic streaming, and TTS streaming.
## Call Flow
1. Caller dials 208-439-5853
2. SignalWire hits `POST /api/signalwire/voice` (via Cloudflare tunnel)
3. We return `<Connect><Stream codec="L16@16000h">` XML
4. SignalWire opens WebSocket to `/api/signalwire/stream`
5. Caller enters queue — host sees phone number on dashboard
6. Host takes call — audio flows bidirectionally
7. Host hangs up — we call SignalWire REST API to end the phone call
## Audio Path
```
Phone → PSTN → SignalWire → WebSocket (base64 L16 JSON) → Our server
Our server → WebSocket (base64 L16 JSON) → SignalWire → PSTN → Phone
```
## SignalWire WebSocket Protocol
Incoming: `{"event": "media", "media": {"payload": "<base64 L16 PCM 16kHz>"}}`
Outgoing: `{"event": "media", "media": {"payload": "<base64 L16 PCM 16kHz>"}}`
Start: `{"event": "start", "start": {"streamSid": "...", "callSid": "..."}}`
Stop: `{"event": "stop"}`
## What Changes
- Remove: browser call-in page, browser WebSocket handler
- Add: SignalWire webhook + WebSocket handler, hangup via REST API
- Modify: CallerService (name→phone, base64 JSON encoding for send), dashboard (show phone number)
- Unchanged: AudioService, queue logic, transcription, TTS streaming, three-way calls
## Config
```
SIGNALWIRE_PROJECT_ID=8eb54732-ade3-4487-8b40-ecd2cd680df7
SIGNALWIRE_SPACE=macneil-media-group-llc.signalwire.com
SIGNALWIRE_TOKEN=PT9c9b61f44ee49914c614fed32aa5c3d7b9372b5199d81dec
SIGNALWIRE_PHONE=+12084395853
```
Webhook URL: `https://radioshow.macneilmediagroup.com/api/signalwire/voice`
No SDK needed — httpx for the one REST call (hangup).

View File

@@ -0,0 +1,855 @@
# SignalWire Phone Call-In Implementation Plan
> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
**Goal:** Replace browser-based WebSocket call-in with real phone calls via SignalWire (208-439-5853).
**Architecture:** SignalWire hits our webhook on inbound calls, we return XML to open a bidirectional WebSocket stream with L16@16kHz audio. The existing queue, channel allocation, transcription, host mic streaming, and TTS streaming are reused — only the WebSocket message format changes (base64 JSON instead of raw binary).
**Tech Stack:** Python/FastAPI, SignalWire Compatibility API (LaML XML + WebSocket), httpx for REST calls, existing audio pipeline.
---
## Task 1: Add SignalWire Config
**Files:**
- Modify: `backend/config.py`
- Modify: `.env`
**Step 1: Add SignalWire settings to config.py**
In `backend/config.py`, add these fields to the `Settings` class after the existing API keys block (after line 16):
```python
# SignalWire
signalwire_project_id: str = os.getenv("SIGNALWIRE_PROJECT_ID", "")
signalwire_space: str = os.getenv("SIGNALWIRE_SPACE", "")
signalwire_token: str = os.getenv("SIGNALWIRE_TOKEN", "")
signalwire_phone: str = os.getenv("SIGNALWIRE_PHONE", "")
```
**Step 2: Add SignalWire vars to .env**
Append to `.env`:
```
# SignalWire
SIGNALWIRE_PROJECT_ID=8eb54732-ade3-4487-8b40-ecd2cd680df7
SIGNALWIRE_SPACE=macneil-media-group-llc.signalwire.com
SIGNALWIRE_TOKEN=PT9c9b61f44ee49914c614fed32aa5c3d7b9372b5199d81dec
SIGNALWIRE_PHONE=+12084395853
```
**Step 3: Verify config loads**
```bash
cd /Users/lukemacneil/ai-podcast && python -c "from backend.config import settings; print(settings.signalwire_space)"
```
Expected: `macneil-media-group-llc.signalwire.com`
**Step 4: Commit**
```bash
git add backend/config.py .env
git commit -m "Add SignalWire configuration"
```
---
## Task 2: Update CallerService for SignalWire Protocol
**Files:**
- Modify: `backend/services/caller_service.py`
The CallerService currently sends raw binary PCM frames. SignalWire needs base64-encoded L16 PCM wrapped in JSON. Also swap `name` field to `phone` since callers now have phone numbers.
**Step 1: Update queue to use `phone` instead of `name`**
In `caller_service.py`, make these changes:
1. Update docstring (line 1): `"""Phone caller queue and audio stream service"""`
2. In `add_to_queue` (line 24): Change parameter `name` to `phone`, and update the dict:
```python
def add_to_queue(self, caller_id: str, phone: str):
with self._lock:
self._queue.append({
"caller_id": caller_id,
"phone": phone,
"queued_at": time.time(),
})
print(f"[Caller] {phone} added to queue (ID: {caller_id})")
```
3. In `get_queue` (line 38): Return `phone` instead of `name`:
```python
def get_queue(self) -> list[dict]:
now = time.time()
with self._lock:
return [
{
"caller_id": c["caller_id"],
"phone": c["phone"],
"wait_time": int(now - c["queued_at"]),
}
for c in self._queue
]
```
4. In `take_call` (line 62): Use `phone` instead of `name`:
```python
def take_call(self, caller_id: str) -> dict:
caller = None
with self._lock:
for c in self._queue:
if c["caller_id"] == caller_id:
caller = c
break
if caller:
self._queue = [c for c in self._queue if c["caller_id"] != caller_id]
if not caller:
raise ValueError(f"Caller {caller_id} not in queue")
channel = self.allocate_channel()
self._caller_counter += 1
phone = caller["phone"]
call_info = {
"caller_id": caller_id,
"phone": phone,
"channel": channel,
"started_at": time.time(),
}
self.active_calls[caller_id] = call_info
print(f"[Caller] {phone} taken on air — channel {channel}")
return call_info
```
5. In `hangup` (line 89): Use `phone` instead of `name`:
```python
def hangup(self, caller_id: str):
call_info = self.active_calls.pop(caller_id, None)
if call_info:
self.release_channel(call_info["channel"])
print(f"[Caller] {call_info['phone']} hung up — channel {call_info['channel']} released")
self._websockets.pop(caller_id, None)
```
**Step 2: Update `send_audio_to_caller` for SignalWire JSON format**
Replace the existing `send_audio_to_caller` method with:
```python
async def send_audio_to_caller(self, caller_id: str, pcm_data: bytes, sample_rate: int):
"""Send small audio chunk to caller via SignalWire WebSocket.
Encodes L16 PCM as base64 JSON per SignalWire protocol.
"""
ws = self._websockets.get(caller_id)
if not ws:
return
try:
import base64
if sample_rate != 16000:
audio = np.frombuffer(pcm_data, dtype=np.int16).astype(np.float32) / 32768.0
ratio = 16000 / sample_rate
out_len = int(len(audio) * ratio)
indices = (np.arange(out_len) / ratio).astype(int)
indices = np.clip(indices, 0, len(audio) - 1)
audio = audio[indices]
pcm_data = (audio * 32767).astype(np.int16).tobytes()
payload = base64.b64encode(pcm_data).decode('ascii')
import json
await ws.send_text(json.dumps({
"event": "media",
"media": {"payload": payload}
}))
except Exception as e:
print(f"[Caller] Failed to send audio: {e}")
```
**Step 3: Update `stream_audio_to_caller` for SignalWire JSON format**
Replace the existing `stream_audio_to_caller` method with:
```python
async def stream_audio_to_caller(self, caller_id: str, pcm_data: bytes, sample_rate: int):
"""Stream large audio (TTS) to caller in real-time chunks via SignalWire WebSocket."""
ws = self._websockets.get(caller_id)
if not ws:
return
self.streaming_tts = True
try:
import base64
import json
audio = np.frombuffer(pcm_data, dtype=np.int16).astype(np.float32) / 32768.0
if sample_rate != 16000:
ratio = 16000 / sample_rate
out_len = int(len(audio) * ratio)
indices = (np.arange(out_len) / ratio).astype(int)
indices = np.clip(indices, 0, len(audio) - 1)
audio = audio[indices]
chunk_samples = 960
for i in range(0, len(audio), chunk_samples):
if caller_id not in self._websockets:
break
chunk = audio[i:i + chunk_samples]
pcm_chunk = (chunk * 32767).astype(np.int16).tobytes()
payload = base64.b64encode(pcm_chunk).decode('ascii')
await ws.send_text(json.dumps({
"event": "media",
"media": {"payload": payload}
}))
await asyncio.sleep(0.055)
except Exception as e:
print(f"[Caller] Failed to stream audio: {e}")
finally:
self.streaming_tts = False
```
**Step 4: Remove `notify_caller` and `disconnect_caller` methods**
These sent browser-specific JSON control messages. SignalWire callers are disconnected via REST API (handled in main.py). Delete methods `notify_caller` (line 168) and `disconnect_caller` (line 175). They will be replaced with a REST-based hangup in Task 4.
**Step 5: Add `call_sid` tracking for SignalWire call hangup**
Add a dict to track SignalWire call SIDs so we can end calls via REST:
In `__init__`, after `self._websockets` line, add:
```python
self._call_sids: dict[str, str] = {} # caller_id -> SignalWire callSid
```
Add methods:
```python
def register_call_sid(self, caller_id: str, call_sid: str):
"""Track SignalWire callSid for a caller"""
self._call_sids[caller_id] = call_sid
def get_call_sid(self, caller_id: str) -> str | None:
"""Get SignalWire callSid for a caller"""
return self._call_sids.get(caller_id)
def unregister_call_sid(self, caller_id: str):
"""Remove callSid tracking"""
self._call_sids.pop(caller_id, None)
```
In `reset`, also clear `self._call_sids`:
```python
self._call_sids.clear()
```
In `hangup`, also clean up call_sid:
```python
self._call_sids.pop(caller_id, None)
```
**Step 6: Run existing tests**
```bash
cd /Users/lukemacneil/ai-podcast && python -m pytest tests/test_caller_service.py -v
```
Tests will likely need updates due to `name``phone` rename. Fix any failures.
**Step 7: Commit**
```bash
git add backend/services/caller_service.py
git commit -m "Update CallerService for SignalWire protocol"
```
---
## Task 3: Add SignalWire Voice Webhook
**Files:**
- Modify: `backend/main.py`
**Step 1: Add the voice webhook endpoint**
Add after the existing route definitions (after line 421), replacing the `/call-in` route:
```python
# --- SignalWire Endpoints ---
from fastapi import Request, Response
@app.post("/api/signalwire/voice")
async def signalwire_voice_webhook(request: Request):
"""Handle inbound call from SignalWire — return XML to start bidirectional stream"""
form = await request.form()
caller_phone = form.get("From", "Unknown")
call_sid = form.get("CallSid", "")
print(f"[SignalWire] Inbound call from {caller_phone} (CallSid: {call_sid})")
# Build WebSocket URL from the request
ws_scheme = "wss"
host = request.headers.get("host", "radioshow.macneilmediagroup.com")
stream_url = f"{ws_scheme}://{host}/api/signalwire/stream"
xml = f"""<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Connect>
<Stream url="{stream_url}" codec="L16@16000h">
<Parameter name="caller_phone" value="{caller_phone}"/>
<Parameter name="call_sid" value="{call_sid}"/>
</Stream>
</Connect>
</Response>"""
return Response(content=xml, media_type="application/xml")
```
**Step 2: Remove the `/call-in` route**
Delete these lines (around line 419-421):
```python
@app.get("/call-in")
async def call_in_page():
return FileResponse(frontend_dir / "call-in.html")
```
**Step 3: Verify server starts**
```bash
cd /Users/lukemacneil/ai-podcast && python -c "from backend.main import app; print('OK')"
```
**Step 4: Commit**
```bash
git add backend/main.py
git commit -m "Add SignalWire voice webhook, remove call-in route"
```
---
## Task 4: Add SignalWire WebSocket Stream Handler
**Files:**
- Modify: `backend/main.py`
This replaces the browser caller WebSocket handler at `/api/caller/stream`.
**Step 1: Replace the browser WebSocket handler**
Delete the entire `caller_audio_stream` function (the `@app.websocket("/api/caller/stream")` handler, lines 807-887).
Add the new SignalWire WebSocket handler:
```python
@app.websocket("/api/signalwire/stream")
async def signalwire_audio_stream(websocket: WebSocket):
"""Handle SignalWire bidirectional audio stream"""
await websocket.accept()
caller_id = str(uuid.uuid4())[:8]
caller_phone = "Unknown"
call_sid = ""
audio_buffer = bytearray()
CHUNK_DURATION_S = 3
SAMPLE_RATE = 16000
chunk_samples = CHUNK_DURATION_S * SAMPLE_RATE
stream_started = False
try:
while True:
raw = await websocket.receive_text()
msg = json.loads(raw)
event = msg.get("event")
if event == "start":
# Extract caller info from stream parameters
params = {}
for p in msg.get("start", {}).get("customParameters", {}):
pass
# customParameters comes as a dict
custom = msg.get("start", {}).get("customParameters", {})
caller_phone = custom.get("caller_phone", "Unknown")
call_sid = custom.get("call_sid", "")
stream_started = True
print(f"[SignalWire WS] Stream started: {caller_phone} (CallSid: {call_sid})")
# Add to queue and register
caller_service.add_to_queue(caller_id, caller_phone)
caller_service.register_websocket(caller_id, websocket)
if call_sid:
caller_service.register_call_sid(caller_id, call_sid)
elif event == "media" and stream_started:
# Decode base64 L16 PCM audio
import base64
payload = msg.get("media", {}).get("payload", "")
if not payload:
continue
pcm_data = base64.b64decode(payload)
# Only process audio if caller is on air
call_info = caller_service.active_calls.get(caller_id)
if not call_info:
continue
audio_buffer.extend(pcm_data)
# Route to configured live caller Loopback channel
audio_service.route_real_caller_audio(pcm_data, SAMPLE_RATE)
# Transcribe when we have enough audio
if len(audio_buffer) >= chunk_samples * 2:
pcm_chunk = bytes(audio_buffer[:chunk_samples * 2])
audio_buffer = audio_buffer[chunk_samples * 2:]
asyncio.create_task(
_handle_real_caller_transcription(caller_id, pcm_chunk, SAMPLE_RATE)
)
elif event == "stop":
print(f"[SignalWire WS] Stream stopped: {caller_phone}")
break
except WebSocketDisconnect:
print(f"[SignalWire WS] Disconnected: {caller_id} ({caller_phone})")
except Exception as e:
print(f"[SignalWire WS] Error: {e}")
finally:
caller_service.unregister_websocket(caller_id)
caller_service.unregister_call_sid(caller_id)
caller_service.remove_from_queue(caller_id)
if caller_id in caller_service.active_calls:
caller_service.hangup(caller_id)
if session.active_real_caller and session.active_real_caller.get("caller_id") == caller_id:
session.active_real_caller = None
if len(caller_service.active_calls) == 0:
audio_service.stop_host_stream()
if audio_buffer:
asyncio.create_task(
_handle_real_caller_transcription(caller_id, bytes(audio_buffer), SAMPLE_RATE)
)
```
**Step 2: Commit**
```bash
git add backend/main.py
git commit -m "Add SignalWire WebSocket stream handler, remove browser handler"
```
---
## Task 5: Update Hangup and Queue Endpoints for SignalWire
**Files:**
- Modify: `backend/main.py`
When the host hangs up or drops a caller, we need to end the actual phone call via SignalWire's REST API.
**Step 1: Add SignalWire hangup helper**
Add this function near the top of `main.py` (after imports):
```python
async def _signalwire_end_call(call_sid: str):
"""End a phone call via SignalWire REST API"""
if not call_sid or not settings.signalwire_space:
return
try:
url = f"https://{settings.signalwire_space}/api/laml/2010-04-01/Accounts/{settings.signalwire_project_id}/Calls/{call_sid}"
async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.post(
url,
data={"Status": "completed"},
auth=(settings.signalwire_project_id, settings.signalwire_token),
)
print(f"[SignalWire] End call {call_sid}: {response.status_code}")
except Exception as e:
print(f"[SignalWire] Failed to end call {call_sid}: {e}")
```
Also add `import httpx` at the top of main.py if not already present.
**Step 2: Update `take_call_from_queue`**
In the `take_call_from_queue` endpoint, update `name` references to `phone`:
```python
@app.post("/api/queue/take/{caller_id}")
async def take_call_from_queue(caller_id: str):
"""Take a caller off hold and put them on air"""
try:
call_info = caller_service.take_call(caller_id)
except ValueError as e:
raise HTTPException(404, str(e))
session.active_real_caller = {
"caller_id": call_info["caller_id"],
"channel": call_info["channel"],
"phone": call_info["phone"],
}
# Start host mic streaming if this is the first real caller
if len(caller_service.active_calls) == 1:
_start_host_audio_sender()
audio_service.start_host_stream(_host_audio_sync_callback)
return {
"status": "on_air",
"caller": call_info,
}
```
Note: The `notify_caller` call is removed — SignalWire callers don't need a JSON status message, they're already connected via the phone.
**Step 3: Update `drop_from_queue`**
End the phone call when dropping:
```python
@app.post("/api/queue/drop/{caller_id}")
async def drop_from_queue(caller_id: str):
"""Drop a caller from the queue"""
call_sid = caller_service.get_call_sid(caller_id)
caller_service.remove_from_queue(caller_id)
if call_sid:
await _signalwire_end_call(call_sid)
return {"status": "dropped"}
```
**Step 4: Update `hangup_real_caller`**
End the phone call when hanging up:
```python
@app.post("/api/hangup/real")
async def hangup_real_caller():
"""Hang up on real caller — disconnect immediately, summarize in background"""
if not session.active_real_caller:
raise HTTPException(400, "No active real caller")
caller_id = session.active_real_caller["caller_id"]
caller_phone = session.active_real_caller["phone"]
conversation_snapshot = list(session.conversation)
auto_followup_enabled = session.auto_followup
# End the phone call via SignalWire
call_sid = caller_service.get_call_sid(caller_id)
caller_service.hangup(caller_id)
if call_sid:
asyncio.create_task(_signalwire_end_call(call_sid))
# Stop host streaming if no more active callers
if len(caller_service.active_calls) == 0:
audio_service.stop_host_stream()
session.active_real_caller = None
# Play hangup sound in background
import threading
hangup_sound = settings.sounds_dir / "hangup.wav"
if hangup_sound.exists():
threading.Thread(target=audio_service.play_sfx, args=(str(hangup_sound),), daemon=True).start()
# Summarize and store history in background
asyncio.create_task(
_summarize_real_call(caller_phone, conversation_snapshot, auto_followup_enabled)
)
return {
"status": "disconnected",
"caller": caller_phone,
}
```
**Step 5: Update `_handle_real_caller_transcription`**
Change `caller_name` to `caller_phone`:
```python
async def _handle_real_caller_transcription(caller_id: str, pcm_data: bytes, sample_rate: int):
"""Transcribe a chunk of real caller audio and add to conversation"""
call_info = caller_service.active_calls.get(caller_id)
if not call_info:
return
text = await transcribe_audio(pcm_data, source_sample_rate=sample_rate)
if not text or not text.strip():
return
caller_phone = call_info["phone"]
print(f"[Real Caller] {caller_phone}: {text}")
session.add_message(f"real_caller:{caller_phone}", text)
if session.ai_respond_mode == "auto" and session.current_caller_key:
asyncio.create_task(_check_ai_auto_respond(text, caller_phone))
```
**Step 6: Update `_summarize_real_call`**
Change `caller_name` parameter to `caller_phone`:
```python
async def _summarize_real_call(caller_phone: str, conversation: list, auto_followup_enabled: bool):
"""Background task: summarize call and store in history"""
summary = ""
if conversation:
transcript_text = "\n".join(
f"{msg['role']}: {msg['content']}" for msg in conversation
)
summary = await llm_service.generate(
messages=[{"role": "user", "content": f"Summarize this radio show call in 1-2 sentences:\n{transcript_text}"}],
system_prompt="You summarize radio show conversations concisely. Focus on what the caller talked about and any emotional moments.",
)
session.call_history.append(CallRecord(
caller_type="real",
caller_name=caller_phone,
summary=summary,
transcript=conversation,
))
print(f"[Real Caller] {caller_phone} call summarized: {summary[:80]}...")
if auto_followup_enabled:
await _auto_followup(summary)
```
**Step 7: Update `_check_ai_auto_respond`**
Change parameter name from `real_caller_name` to `real_caller_phone`:
```python
async def _check_ai_auto_respond(real_caller_text: str, real_caller_phone: str):
```
(The body doesn't use the name/phone parameter in any way that needs changing.)
**Step 8: Update TTS streaming references**
In `text_to_speech` endpoint and `_check_ai_auto_respond`, the `session.active_real_caller` dict now uses `phone` instead of `name`. No code change needed for the TTS streaming since it only uses `caller_id`.
**Step 9: Verify server starts**
```bash
cd /Users/lukemacneil/ai-podcast && python -c "from backend.main import app; print('OK')"
```
**Step 10: Commit**
```bash
git add backend/main.py
git commit -m "Update hangup and queue endpoints for SignalWire REST API"
```
---
## Task 6: Update Frontend for Phone Callers
**Files:**
- Modify: `frontend/js/app.js`
- Modify: `frontend/index.html`
**Step 1: Update queue rendering in app.js**
In `renderQueue` function (around line 875), change `caller.name` to `caller.phone`:
```javascript
el.innerHTML = queue.map(caller => {
const mins = Math.floor(caller.wait_time / 60);
const secs = caller.wait_time % 60;
const waitStr = mins > 0 ? `${mins}m ${secs}s` : `${secs}s`;
return `
<div class="queue-item">
<span class="queue-name">${caller.phone}</span>
<span class="queue-wait">waiting ${waitStr}</span>
<button class="queue-take-btn" onclick="takeCall('${caller.caller_id}')">Take Call</button>
<button class="queue-drop-btn" onclick="dropCall('${caller.caller_id}')">Drop</button>
</div>
`;
}).join('');
```
**Step 2: Update `takeCall` log message**
In `takeCall` function (around line 896), change `data.caller.name` to `data.caller.phone`:
```javascript
if (data.status === 'on_air') {
showRealCaller(data.caller);
log(`${data.caller.phone} is on air — Channel ${data.caller.channel}`);
}
```
**Step 3: Update `showRealCaller` to use phone**
In `showRealCaller` function (around line 939):
```javascript
function showRealCaller(callerInfo) {
const nameEl = document.getElementById('real-caller-name');
const chEl = document.getElementById('real-caller-channel');
if (nameEl) nameEl.textContent = callerInfo.phone;
if (chEl) chEl.textContent = `Ch ${callerInfo.channel}`;
```
**Step 4: Update index.html queue section header**
In `frontend/index.html`, change the queue section header (line 56) — remove the call-in page link:
```html
<section class="queue-section">
<h2>Incoming Calls</h2>
<div id="call-queue" class="call-queue">
```
**Step 5: Bump cache version in index.html**
Find the app.js script tag and bump the version:
```html
<script src="/js/app.js?v=13"></script>
```
**Step 6: Commit**
```bash
git add frontend/js/app.js frontend/index.html
git commit -m "Update frontend for phone caller display"
```
---
## Task 7: Remove Browser Call-In Files
**Files:**
- Delete: `frontend/call-in.html`
- Delete: `frontend/js/call-in.js`
**Step 1: Delete files**
```bash
cd /Users/lukemacneil/ai-podcast && rm frontend/call-in.html frontend/js/call-in.js
```
**Step 2: Commit**
```bash
git add frontend/call-in.html frontend/js/call-in.js
git commit -m "Remove browser call-in page"
```
---
## Task 8: Update Tests
**Files:**
- Modify: `tests/test_caller_service.py`
**Step 1: Update tests for `name` → `phone` rename**
Throughout `test_caller_service.py`, change:
- `add_to_queue(caller_id, "TestName")``add_to_queue(caller_id, "+15551234567")`
- `caller["name"]``caller["phone"]`
- `call_info["name"]``call_info["phone"]`
Also remove any tests for `notify_caller` or `disconnect_caller` if they exist, since those methods were removed.
**Step 2: Run all tests**
```bash
cd /Users/lukemacneil/ai-podcast && python -m pytest tests/ -v
```
Expected: All pass.
**Step 3: Commit**
```bash
git add tests/
git commit -m "Update tests for SignalWire phone caller format"
```
---
## Task 9: Configure SignalWire Webhook and End-to-End Test
**Step 1: Start the server**
```bash
cd /Users/lukemacneil/ai-podcast && python -m uvicorn backend.main:app --reload --host 0.0.0.0 --port 8000
```
**Step 2: Verify webhook endpoint responds**
```bash
curl -X POST http://localhost:8000/api/signalwire/voice \
-d "From=+15551234567&CallSid=test123" \
-H "Content-Type: application/x-www-form-urlencoded"
```
Expected: XML response with `<Connect><Stream>` containing the WebSocket URL.
**Step 3: Verify Cloudflare tunnel is running**
```bash
curl -s https://radioshow.macneilmediagroup.com/api/server/status
```
Expected: JSON response with `"status": "running"`.
**Step 4: Configure SignalWire webhook**
In the SignalWire dashboard:
1. Go to Phone Numbers → 208-439-5853
2. Set "When a call comes in" to: `https://radioshow.macneilmediagroup.com/api/signalwire/voice`
3. Method: POST
4. Handler type: LaML Webhooks
**Step 5: Test with a real call**
Call 208-439-5853 from a phone. Expected:
1. Call connects (no ringing/hold — goes straight to stream)
2. Caller appears in queue on host dashboard with phone number
3. Host clicks "Take Call" → audio flows bidirectionally
4. Host clicks "Hang Up" → phone call ends
**Step 6: Commit any fixes needed**
```bash
git add -A
git commit -m "Final SignalWire integration fixes"
```
---
## Summary
| Task | What | Key Files |
|------|------|-----------|
| 1 | SignalWire config | `config.py`, `.env` |
| 2 | CallerService protocol update | `caller_service.py` |
| 3 | Voice webhook endpoint | `main.py` |
| 4 | WebSocket stream handler | `main.py` |
| 5 | Hangup/queue via REST API | `main.py` |
| 6 | Frontend phone display | `app.js`, `index.html` |
| 7 | Remove browser call-in | `call-in.html`, `call-in.js` |
| 8 | Update tests | `tests/` |
| 9 | Configure & test | SignalWire dashboard |
Tasks 1-5 are sequential backend. Task 6-7 are frontend (can parallel after task 5). Task 8 after task 2. Task 9 is final integration test.

View File

@@ -593,6 +593,23 @@ section h2 {
.hangup-btn.small { font-size: 0.75rem; padding: 0.2rem 0.5rem; }
.auto-followup-label { display: flex; align-items: center; gap: 0.4rem; font-size: 0.8rem; color: #999; margin-top: 0.5rem; }
/* Returning Caller */
.caller-btn.returning {
border-color: #f9a825;
color: #f9a825;
}
.caller-btn.returning:hover {
border-color: #fdd835;
}
/* Screening Badges */
.screening-badge { font-size: 0.7rem; padding: 0.1rem 0.4rem; border-radius: 3px; font-weight: bold; }
.screening-badge.screening { background: #e65100; color: white; animation: pulse 1.5s infinite; }
.screening-badge.screened { background: #2e7d32; color: white; }
.screening-summary { font-size: 0.8rem; color: #aaa; font-style: italic; flex-basis: 100%; margin-top: 0.2rem; }
.queue-item { flex-wrap: wrap; }
/* Three-Party Chat */
.message.real-caller { border-left: 3px solid #c62828; padding-left: 0.5rem; }
.message.ai-caller { border-left: 3px solid #1565c0; padding-left: 0.5rem; }

View File

@@ -13,6 +13,7 @@
<div class="header-buttons">
<button id="on-air-btn" class="on-air-btn off">OFF AIR</button>
<button id="new-session-btn" class="new-session-btn">New Session</button>
<button id="export-session-btn">Export</button>
<button id="settings-btn">Settings</button>
</div>
</header>

View File

@@ -85,6 +85,9 @@ function initEventListeners() {
});
}
// Export session
document.getElementById('export-session-btn')?.addEventListener('click', exportSession);
// Server controls
document.getElementById('restart-server-btn')?.addEventListener('click', restartServer);
document.getElementById('stop-server-btn')?.addEventListener('click', stopServer);
@@ -351,7 +354,8 @@ async function loadCallers() {
data.callers.forEach(caller => {
const btn = document.createElement('button');
btn.className = 'caller-btn';
btn.textContent = caller.name;
if (caller.returning) btn.classList.add('returning');
btn.textContent = caller.returning ? `\u2605 ${caller.name}` : caller.name;
btn.dataset.key = caller.key;
btn.addEventListener('click', () => startCall(caller.key, caller.name));
grid.appendChild(btn);
@@ -996,10 +1000,21 @@ function renderQueue(queue) {
const mins = Math.floor(caller.wait_time / 60);
const secs = caller.wait_time % 60;
const waitStr = mins > 0 ? `${mins}m ${secs}s` : `${secs}s`;
const displayName = caller.caller_name || caller.phone;
const screenBadge = caller.screening_status === 'complete'
? '<span class="screening-badge screened">Screened</span>'
: caller.screening_status === 'screening'
? '<span class="screening-badge screening">Screening...</span>'
: '';
const summary = caller.screening_summary
? `<div class="screening-summary">${caller.screening_summary}</div>`
: '';
return `
<div class="queue-item">
<span class="queue-name">${caller.phone}</span>
<span class="queue-name">${displayName}</span>
${screenBadge}
<span class="queue-wait">waiting ${waitStr}</span>
${summary}
<button class="queue-take-btn" onclick="takeCall('${caller.caller_id}')">Take Call</button>
<button class="queue-drop-btn" onclick="dropCall('${caller.caller_id}')">Drop</button>
</div>
@@ -1155,6 +1170,23 @@ async function fetchConversationUpdates() {
}
async function exportSession() {
try {
const res = await safeFetch('/api/session/export');
const blob = new Blob([JSON.stringify(res, null, 2)], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `session-${res.session_id}.json`;
a.click();
URL.revokeObjectURL(url);
log(`Exported session: ${res.call_count} calls`);
} catch (err) {
log('Export error: ' + err.message);
}
}
async function stopServer() {
if (!confirm('Stop the server? You will need to restart it manually.')) return;

View File

@@ -344,6 +344,7 @@ def main():
parser.add_argument("--dry-run", "-d", action="store_true", help="Generate metadata but don't publish")
parser.add_argument("--title", "-t", help="Override generated title")
parser.add_argument("--description", help="Override generated description")
parser.add_argument("--session-data", "-s", help="Path to session export JSON (from /api/session/export)")
args = parser.parse_args()
audio_path = Path(args.audio_file).expanduser().resolve()
@@ -358,12 +359,28 @@ def main():
episode_number = get_next_episode_number()
print(f"Episode number: {episode_number}")
# Load session data if provided
session_data = None
if args.session_data:
session_path = Path(args.session_data).expanduser().resolve()
if session_path.exists():
with open(session_path) as f:
session_data = json.load(f)
print(f"Loaded session data: {session_data.get('call_count', 0)} calls")
else:
print(f"Warning: Session data file not found: {session_path}")
# Step 1: Transcribe
transcript = transcribe_audio(str(audio_path))
# Step 2: Generate metadata
metadata = generate_metadata(transcript, episode_number)
# Use session chapters if available (more accurate than LLM-generated)
if session_data and session_data.get("chapters"):
metadata["chapters"] = session_data["chapters"]
print(f" Using {len(metadata['chapters'])} chapters from session data")
# Apply overrides
if args.title:
metadata["title"] = args.title
@@ -374,6 +391,13 @@ def main():
chapters_path = audio_path.with_suffix(".chapters.json")
save_chapters(metadata, str(chapters_path))
# Save transcript alongside episode if session data available
if session_data and session_data.get("transcript"):
transcript_path = audio_path.with_suffix(".transcript.txt")
with open(transcript_path, "w") as f:
f.write(session_data["transcript"])
print(f" Transcript saved to: {transcript_path}")
if args.dry_run:
print("\n[DRY RUN] Would publish with:")
print(f" Title: {metadata['title']}")