Compare commits

...

5 Commits

Author SHA1 Message Date
356bf145b8 Add show improvement features: crossfade, emotions, returning callers, transcripts, screening
- Music crossfade: smooth 3-second blend between tracks instead of hard stop/start
- Emotional detection: analyze host mood from recent messages so callers adapt tone
- AI caller summaries: generate call summaries with timestamps for show history
- Returning callers: persist regular callers across sessions with call history
- Session export: generate transcripts with speaker labels and chapter markers
- Caller screening: AI pre-screens phone callers to get name and topic while queued

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-07 02:43:01 -07:00
de5577e582 Add local food opinions, nostalgia, and show history reactions
Callers now have strong food opinions (Sparky's green chile, Blake's
Lotaburger, etc.), nostalgic memories of how their town used to be,
and 60% chance of having a strong reaction to a previous caller that
they bring up early in the call by name.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-07 01:51:39 -07:00
c789069f6f Add weather, time, season, and situational context for callers
Weather lookup via Open-Meteo API with 30-min cache, time/day
awareness (Mountain time), moon phase calculation, seasonal context
with local events, and probabilistic situational details: road
context, phone situation, background music, recent errands, TV tonight.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-07 01:46:27 -07:00
bd6c8ccbab Landing page: testimonials slider, how-it-works page, 25 TTS voices
- Add testimonial slider with 8 fake caller reviews
- Add how-it-works page with visual architecture diagram
- Expand voice pools: Inworld 25 voices (14M/11F), ElevenLabs 22 (14M/8F)
- Voice pools auto-switch when TTS provider changes
- Add cover art locally, update cache-busted image refs
- Add "More from Luke" footer links (MMG, prints, YouTube)
- Ad channel configurable in settings UI

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-07 01:34:30 -07:00
f654a5cbb1 Deep caller personality: named people, memories, vehicles, opinions, arcs
- Named relationships (20M/20F): "my buddy Ray", "my wife Linda" — not generic
- Relationship status with detail: "married 15 years, second marriage"
- Vehicle they drive: rural southwest flavor (F-150s, Tacomas, old Broncos)
- What they were doing before calling: grounds call in a physical moment
- Specific memory/story to reference: flash floods, poker wins, desert nights
- Food/drink right now: Tecate on the porch, third cup of coffee
- Strong random opinions: speed limits, green chile, desert philosophy
- Contradictions/secrets: tough guy who cries at TV, reads physics at work
- Verbal fingerprints: 2 specific phrases per caller
- Emotional arcs: mood shifts during the call
- Show relationship: first-timer, regular, skeptic, reactive
- Late-night reasons: why they're awake
- Topic drift tendencies for some callers
- Regional speech patterns in prompt (over in, down the road, out here)
- Opening line variety based on personality
- Local town news enrichment via SearXNG
- Ad channel now configurable in settings UI

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-07 01:01:32 -07:00
20 changed files with 5456 additions and 109 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -53,6 +53,14 @@ class AudioService:
self._music_volume: float = 0.3 self._music_volume: float = 0.3
self._music_loop: bool = True self._music_loop: bool = True
# Music crossfade state
self._crossfade_active: bool = False
self._crossfade_old_data: Optional[np.ndarray] = None
self._crossfade_old_position: int = 0
self._crossfade_progress: float = 0.0
self._crossfade_samples: int = 0
self._crossfade_step: float = 0.0
# Caller playback state # Caller playback state
self._caller_stop_event = threading.Event() self._caller_stop_event = threading.Event()
self._caller_thread: Optional[threading.Thread] = None self._caller_thread: Optional[threading.Thread] = None
@@ -135,6 +143,7 @@ class AudioService:
live_caller_channel: Optional[int] = None, live_caller_channel: Optional[int] = None,
music_channel: Optional[int] = None, music_channel: Optional[int] = None,
sfx_channel: Optional[int] = None, sfx_channel: Optional[int] = None,
ad_channel: Optional[int] = None,
phone_filter: Optional[bool] = None phone_filter: Optional[bool] = None
): ):
"""Configure audio devices and channels""" """Configure audio devices and channels"""
@@ -152,6 +161,8 @@ class AudioService:
self.music_channel = music_channel self.music_channel = music_channel
if sfx_channel is not None: if sfx_channel is not None:
self.sfx_channel = sfx_channel self.sfx_channel = sfx_channel
if ad_channel is not None:
self.ad_channel = ad_channel
if phone_filter is not None: if phone_filter is not None:
self.phone_filter = phone_filter self.phone_filter = phone_filter
@@ -168,6 +179,7 @@ class AudioService:
"live_caller_channel": self.live_caller_channel, "live_caller_channel": self.live_caller_channel,
"music_channel": self.music_channel, "music_channel": self.music_channel,
"sfx_channel": self.sfx_channel, "sfx_channel": self.sfx_channel,
"ad_channel": self.ad_channel,
"phone_filter": self.phone_filter, "phone_filter": self.phone_filter,
} }
@@ -574,6 +586,55 @@ class AudioService:
print(f"Failed to load music: {e}") print(f"Failed to load music: {e}")
return False return False
def crossfade_to(self, file_path: str, duration: float = 3.0):
"""Crossfade from current music track to a new one"""
import librosa
if not self._music_playing or self._music_resampled is None:
if self.load_music(file_path):
self.play_music()
return
# Load the new track
path = Path(file_path)
if not path.exists():
print(f"Music file not found: {file_path}")
return
try:
audio, sr = librosa.load(str(path), sr=self.output_sample_rate, mono=True)
new_data = audio.astype(np.float32)
except Exception as e:
print(f"Failed to load music for crossfade: {e}")
return
# Get device sample rate for resampling
if self.output_device is not None:
device_info = sd.query_devices(self.output_device)
device_sr = int(device_info['default_samplerate'])
else:
device_sr = self.output_sample_rate
if self.output_sample_rate != device_sr:
new_resampled = librosa.resample(new_data, orig_sr=self.output_sample_rate, target_sr=device_sr)
else:
new_resampled = new_data.copy()
# Swap: current becomes old, new becomes current
self._crossfade_old_data = self._music_resampled
self._crossfade_old_position = self._music_position
self._music_resampled = new_resampled
self._music_data = new_data
self._music_position = 0
# Configure crossfade timing
self._crossfade_samples = int(device_sr * duration)
self._crossfade_progress = 0.0
self._crossfade_step = 1.0 / self._crossfade_samples if self._crossfade_samples > 0 else 1.0
self._crossfade_active = True
print(f"Crossfading to {path.name} over {duration}s")
def play_music(self): def play_music(self):
"""Start music playback to specific channel""" """Start music playback to specific channel"""
import librosa import librosa
@@ -621,25 +682,55 @@ class AudioService:
if not self._music_playing or self._music_resampled is None: if not self._music_playing or self._music_resampled is None:
return return
# Read new track samples
end_pos = self._music_position + frames end_pos = self._music_position + frames
if end_pos <= len(self._music_resampled): if end_pos <= len(self._music_resampled):
outdata[:, channel_idx] = self._music_resampled[self._music_position:end_pos] * self._music_volume new_samples = self._music_resampled[self._music_position:end_pos].copy()
self._music_position = end_pos self._music_position = end_pos
else: else:
remaining = len(self._music_resampled) - self._music_position remaining = len(self._music_resampled) - self._music_position
new_samples = np.zeros(frames, dtype=np.float32)
if remaining > 0: if remaining > 0:
outdata[:remaining, channel_idx] = self._music_resampled[self._music_position:] * self._music_volume new_samples[:remaining] = self._music_resampled[self._music_position:]
if self._music_loop: if self._music_loop:
self._music_position = 0
wrap_frames = frames - remaining wrap_frames = frames - remaining
if wrap_frames > 0: if wrap_frames > 0:
outdata[remaining:, channel_idx] = self._music_resampled[:wrap_frames] * self._music_volume new_samples[remaining:] = self._music_resampled[:wrap_frames]
self._music_position = wrap_frames self._music_position = wrap_frames
else: else:
self._music_position = len(self._music_resampled)
if remaining <= 0:
self._music_playing = False self._music_playing = False
if self._crossfade_active and self._crossfade_old_data is not None:
# Read old track samples
old_end = self._crossfade_old_position + frames
if old_end <= len(self._crossfade_old_data):
old_samples = self._crossfade_old_data[self._crossfade_old_position:old_end]
self._crossfade_old_position = old_end
else:
old_remaining = len(self._crossfade_old_data) - self._crossfade_old_position
old_samples = np.zeros(frames, dtype=np.float32)
if old_remaining > 0:
old_samples[:old_remaining] = self._crossfade_old_data[self._crossfade_old_position:]
self._crossfade_old_position = len(self._crossfade_old_data)
# Compute fade curves for this chunk
start_progress = self._crossfade_progress
end_progress = min(1.0, start_progress + self._crossfade_step * frames)
fade_in = np.linspace(start_progress, end_progress, frames, dtype=np.float32)
fade_out = 1.0 - fade_in
outdata[:, channel_idx] = (old_samples * fade_out + new_samples * fade_in) * self._music_volume
self._crossfade_progress = end_progress
if self._crossfade_progress >= 1.0:
self._crossfade_active = False
self._crossfade_old_data = None
print("Crossfade complete")
else:
outdata[:, channel_idx] = new_samples * self._music_volume
try: try:
self._music_stream = sd.OutputStream( self._music_stream = sd.OutputStream(
device=device, device=device,
@@ -655,15 +746,48 @@ class AudioService:
print(f"Music playback error: {e}") print(f"Music playback error: {e}")
self._music_playing = False self._music_playing = False
def stop_music(self): def stop_music(self, fade_duration: float = 2.0):
"""Stop music playback""" """Stop music playback with fade out"""
if not self._music_playing or not self._music_stream:
self._music_playing = False self._music_playing = False
if self._music_stream: if self._music_stream:
self._music_stream.stop() self._music_stream.stop()
self._music_stream.close() self._music_stream.close()
self._music_stream = None self._music_stream = None
self._music_position = 0 self._music_position = 0
return
if fade_duration <= 0:
self._music_playing = False
self._music_stream.stop()
self._music_stream.close()
self._music_stream = None
self._music_position = 0
print("Music stopped") print("Music stopped")
return
import threading
original_volume = self._music_volume
steps = 20
step_time = fade_duration / steps
def _fade():
for i in range(steps):
if not self._music_playing:
break
self._music_volume = original_volume * (1 - (i + 1) / steps)
import time
time.sleep(step_time)
self._music_playing = False
if self._music_stream:
self._music_stream.stop()
self._music_stream.close()
self._music_stream = None
self._music_position = 0
self._music_volume = original_volume
print("Music faded out and stopped")
threading.Thread(target=_fade, daemon=True).start()
def play_ad(self, file_path: str): def play_ad(self, file_path: str):
"""Load and play an ad file once (no loop) on the ad channel""" """Load and play an ad file once (no loop) on the ad channel"""

View File

@@ -25,6 +25,7 @@ class CallerService:
self._stream_sids: dict[str, str] = {} # caller_id -> SignalWire streamSid self._stream_sids: dict[str, str] = {} # caller_id -> SignalWire streamSid
self._send_locks: dict[str, asyncio.Lock] = {} # per-caller send lock self._send_locks: dict[str, asyncio.Lock] = {} # per-caller send lock
self._streaming_tts: set[str] = set() # caller_ids currently receiving TTS self._streaming_tts: set[str] = set() # caller_ids currently receiving TTS
self._screening_state: dict[str, dict] = {} # caller_id -> screening conversation
def _get_send_lock(self, caller_id: str) -> asyncio.Lock: def _get_send_lock(self, caller_id: str) -> asyncio.Lock:
if caller_id not in self._send_locks: if caller_id not in self._send_locks:
@@ -51,18 +52,6 @@ class CallerService:
self._queue = [c for c in self._queue if c["caller_id"] != caller_id] self._queue = [c for c in self._queue if c["caller_id"] != caller_id]
print(f"[Caller] {caller_id} removed from queue") print(f"[Caller] {caller_id} removed from queue")
def get_queue(self) -> list[dict]:
now = time.time()
with self._lock:
return [
{
"caller_id": c["caller_id"],
"phone": c["phone"],
"wait_time": int(now - c["queued_at"]),
}
for c in self._queue
]
def allocate_channel(self) -> int: def allocate_channel(self) -> int:
with self._lock: with self._lock:
ch = self.FIRST_REAL_CHANNEL ch = self.FIRST_REAL_CHANNEL
@@ -111,6 +100,7 @@ class CallerService:
self._call_sids.pop(caller_id, None) self._call_sids.pop(caller_id, None)
self._stream_sids.pop(caller_id, None) self._stream_sids.pop(caller_id, None)
self._send_locks.pop(caller_id, None) self._send_locks.pop(caller_id, None)
self._screening_state.pop(caller_id, None)
def reset(self): def reset(self):
with self._lock: with self._lock:
@@ -125,8 +115,72 @@ class CallerService:
self._stream_sids.clear() self._stream_sids.clear()
self._send_locks.clear() self._send_locks.clear()
self._streaming_tts.clear() self._streaming_tts.clear()
self._screening_state.clear()
print("[Caller] Service reset") print("[Caller] Service reset")
# --- Screening ---
def start_screening(self, caller_id: str):
"""Initialize screening state for a queued caller"""
self._screening_state[caller_id] = {
"conversation": [],
"caller_name": None,
"topic": None,
"status": "screening", # screening, complete
"response_count": 0,
}
print(f"[Screening] Started for {caller_id}")
def get_screening_state(self, caller_id: str) -> Optional[dict]:
return self._screening_state.get(caller_id)
def update_screening(self, caller_id: str, caller_text: str = None,
screener_text: str = None, caller_name: str = None,
topic: str = None):
"""Update screening conversation and extracted info"""
state = self._screening_state.get(caller_id)
if not state:
return
if caller_text:
state["conversation"].append({"role": "caller", "content": caller_text})
state["response_count"] += 1
if screener_text:
state["conversation"].append({"role": "screener", "content": screener_text})
if caller_name:
state["caller_name"] = caller_name
if topic:
state["topic"] = topic
def end_screening(self, caller_id: str):
"""Mark screening as complete"""
state = self._screening_state.get(caller_id)
if state:
state["status"] = "complete"
print(f"[Screening] Complete for {caller_id}: name={state.get('caller_name')}, topic={state.get('topic')}")
def get_queue(self) -> list[dict]:
"""Get queue with screening info enrichment"""
now = time.time()
with self._lock:
result = []
for c in self._queue:
entry = {
"caller_id": c["caller_id"],
"phone": c["phone"],
"wait_time": int(now - c["queued_at"]),
}
screening = self._screening_state.get(c["caller_id"])
if screening:
entry["screening_status"] = screening["status"]
entry["caller_name"] = screening.get("caller_name")
entry["screening_summary"] = screening.get("topic")
else:
entry["screening_status"] = None
entry["caller_name"] = None
entry["screening_summary"] = None
result.append(entry)
return result
def register_websocket(self, caller_id: str, websocket): def register_websocket(self, caller_id: str, websocket):
"""Register a WebSocket for a caller""" """Register a WebSocket for a caller"""
self._websockets[caller_id] = websocket self._websockets[caller_id] = websocket

View File

@@ -0,0 +1,95 @@
"""Returning caller persistence service"""
import json
import time
import uuid
from pathlib import Path
from typing import Optional
DATA_FILE = Path(__file__).parent.parent.parent / "data" / "regulars.json"
MAX_REGULARS = 12
class RegularCallerService:
"""Manages persistent 'regular' callers who return across sessions"""
def __init__(self):
self._regulars: list[dict] = []
self._load()
def _load(self):
if DATA_FILE.exists():
try:
with open(DATA_FILE) as f:
data = json.load(f)
self._regulars = data.get("regulars", [])
print(f"[Regulars] Loaded {len(self._regulars)} regular callers")
except Exception as e:
print(f"[Regulars] Failed to load: {e}")
self._regulars = []
def _save(self):
try:
DATA_FILE.parent.mkdir(parents=True, exist_ok=True)
with open(DATA_FILE, "w") as f:
json.dump({"regulars": self._regulars}, f, indent=2)
except Exception as e:
print(f"[Regulars] Failed to save: {e}")
def get_regulars(self) -> list[dict]:
return list(self._regulars)
def get_returning_callers(self, count: int = 2) -> list[dict]:
"""Get up to `count` regulars for returning caller slots"""
import random
if not self._regulars:
return []
available = [r for r in self._regulars if len(r.get("call_history", [])) > 0]
if not available:
return []
return random.sample(available, min(count, len(available)))
def add_regular(self, name: str, gender: str, age: int, job: str,
location: str, personality_traits: list[str],
first_call_summary: str) -> dict:
"""Promote a first-time caller to regular"""
# Retire oldest if at cap
if len(self._regulars) >= MAX_REGULARS:
self._regulars.sort(key=lambda r: r.get("last_call", 0))
retired = self._regulars.pop(0)
print(f"[Regulars] Retired {retired['name']} to make room")
regular = {
"id": str(uuid.uuid4())[:8],
"name": name,
"gender": gender,
"age": age,
"job": job,
"location": location,
"personality_traits": personality_traits,
"call_history": [
{"summary": first_call_summary, "timestamp": time.time()}
],
"last_call": time.time(),
"created_at": time.time(),
}
self._regulars.append(regular)
self._save()
print(f"[Regulars] Promoted {name} to regular (total: {len(self._regulars)})")
return regular
def update_after_call(self, regular_id: str, call_summary: str):
"""Update a regular's history after a returning call"""
for regular in self._regulars:
if regular["id"] == regular_id:
regular.setdefault("call_history", []).append(
{"summary": call_summary, "timestamp": time.time()}
)
regular["last_call"] = time.time()
self._save()
print(f"[Regulars] Updated {regular['name']} call history ({len(regular['call_history'])} calls)")
return
print(f"[Regulars] Regular {regular_id} not found for update")
regular_caller_service = RegularCallerService()

View File

@@ -577,7 +577,12 @@ async def generate_speech_inworld(text: str, voice_id: str) -> tuple[np.ndarray,
import base64 import base64
import librosa import librosa
voice = INWORLD_VOICES.get(voice_id, DEFAULT_INWORLD_VOICE) # voice_id is now the Inworld voice name directly (e.g. "Edward")
# Fall back to legacy mapping if it's an ElevenLabs ID
if voice_id in INWORLD_VOICES:
voice = INWORLD_VOICES[voice_id]
else:
voice = voice_id
api_key = settings.inworld_api_key api_key = settings.inworld_api_key
if not api_key: if not api_key:

1
data/regulars.json Normal file
View File

@@ -0,0 +1 @@
{"regulars": []}

View File

@@ -35,7 +35,7 @@ Session Reset / First Access to Caller Slot
_randomize_callers() _randomize_callers()
│ Assigns unique names (from 24M/24F pool) and voices (5M/5F) to 10 slots │ Assigns unique names (from 24M/24F pool) and voices (Inworld: 14M/11F, ElevenLabs: 14M/8F) to 10 slots
generate_caller_background(base) generate_caller_background(base)

View File

@@ -0,0 +1,189 @@
# Real Callers + AI Follow-Up Design
## Overview
Add real phone callers to the AI Radio Show via Twilio, alongside existing AI callers. Real callers dial a phone number, wait in a hold queue, and get taken on air by the host. Three-way conversations between host, real caller, and AI caller are supported. AI follow-up callers automatically reference what real callers said.
## Requirements
- Real callers connect via Twilio phone number
- Full-duplex audio — host and caller talk simultaneously, talk over each other
- Each real caller gets their own dedicated audio channel for recording
- Three-way calls: host + real caller + AI caller all live at once
- AI caller can respond manually (host-triggered) or automatically (listens and decides when to jump in)
- AI follow-up callers reference real caller conversations via show history
- Auto follow-up mode: system picks an AI caller and connects them after a real call
- Simple hold queue — callers wait with hold music, host sees list and picks who goes on air
- Twilio webhooks exposed via Cloudflare tunnel
## Architecture
### Audio Routing (Loopback Channels)
```
Ch 1: Host mic (existing)
Ch 2: AI callers / TTS (existing)
Ch 3+: Real callers (dynamically assigned per call)
Ch N-1: Music (existing)
Ch N: SFX (existing)
```
### Call Flow — Real Caller
```
Caller dials Twilio number
→ Twilio POST /api/twilio/voice
→ TwiML response: greeting + enqueue with hold music
→ Caller waits in hold queue
→ Host sees caller in dashboard queue panel
→ Host clicks "Take Call"
→ POST /api/queue/take/{call_sid}
→ Twilio opens WebSocket to /api/twilio/stream
→ Bidirectional audio:
Caller audio → decode mulaw → dedicated Loopback channel
Host audio + AI TTS → encode mulaw → Twilio → caller hears both
→ Real-time Whisper transcription of caller audio
→ Host hangs up → call summarized → stored in show history
```
### Three-Way Call Flow
```
Host mic ──────→ Ch 1 (recording)
→ Twilio outbound (real caller hears you)
→ Whisper transcription (AI gets your words)
Real caller ──→ Ch 3+ (recording, dedicated channel)
→ Whisper transcription (AI gets their words)
→ Host headphones
AI TTS ───────→ Ch 2 (recording)
→ Twilio outbound (real caller hears AI)
→ Host headphones (already works)
```
Conversation history becomes three-party with role labels: `host`, `real_caller`, `ai_caller`.
### AI Auto-Respond Mode
When toggled on, after each real caller transcription chunk:
1. Lightweight LLM call ("should I respond?" — use fast model like Haiku)
2. If YES → full response generated → TTS → plays on AI channel + streams to Twilio
3. Cooldown (~10s) prevents rapid-fire
4. Host can override with mute button
### AI Follow-Up System
After a real caller hangs up:
1. Full transcript (host + real caller + any AI) summarized by LLM
2. Summary stored in `session.call_history`
3. Next AI caller's system prompt includes show history:
```
EARLIER IN THE SHOW:
- Dave (real caller) called about his wife leaving after 12 years.
He got emotional about his kids.
- Jasmine called about her boss hitting on her at work.
You can reference these if it feels natural. Don't force it.
```
**Host-triggered (default):** Click any AI caller as normal. They already have show context.
**Auto mode:** After real caller hangs up, system waits ~5-10s, picks a fitting AI caller via short LLM call, biases their background generation toward the topic, auto-connects.
## Backend Changes
### New Module: `backend/services/twilio_service.py`
Manages Twilio integration:
- WebSocket handler for Media Streams (decode/encode mulaw 8kHz ↔ PCM)
- Call queue state (waiting callers, SIDs, timestamps, assigned channels)
- Channel pool management (allocate/release Loopback channels for real callers)
- Outbound audio mixing (host + AI TTS → mulaw → Twilio)
- Methods: `take_call()`, `hangup_real_caller()`, `get_queue()`, `send_audio_to_caller()`
### New Endpoints
```python
# Twilio webhooks
POST /api/twilio/voice # Incoming call → TwiML (greet + enqueue)
POST /api/twilio/hold-music # Hold music TwiML for waiting callers
WS /api/twilio/stream # Media Streams WebSocket (bidirectional audio)
# Host controls
GET /api/queue # List waiting callers (number, wait time)
POST /api/queue/take/{call_sid} # Dequeue caller → start media stream
POST /api/queue/drop/{call_sid} # Drop caller from queue
# AI follow-up
POST /api/followup/generate # Summarize last real call, trigger AI follow-up
```
### Session Model Changes
```python
class CallRecord:
caller_type: str # "ai" or "real"
caller_name: str # "Tony" or "Caller #3"
summary: str # LLM-generated summary after hangup
transcript: list[dict] # Full conversation [{role, content}]
class Session:
# Existing fields...
call_history: list[CallRecord] # All calls this episode
active_real_caller: dict | None # {call_sid, phone, channel, name}
active_ai_caller: str | None # Caller key
ai_respond_mode: str # "manual" or "auto"
auto_followup: bool # Auto-generate AI follow-up after real calls
```
Three-party conversation history uses roles: `host`, `real_caller:{name}`, `ai_caller:{name}`.
### AI Caller Prompt Changes
`get_caller_prompt()` extended to include:
- Show history from `session.call_history`
- Current real caller context (if three-way call active)
- Instructions for referencing real callers naturally
## Frontend Changes
### New: Call Queue Panel
Between callers section and chat. Shows waiting real callers with phone number and wait time. "Take Call" and "Drop" buttons per caller. Polls `/api/queue` every few seconds.
### Modified: Active Call Indicator
Shows real caller and AI caller simultaneously when both active:
- Real caller: name, channel number, call duration, hang up button
- AI caller: name, Manual/Auto toggle, "Let [name] respond" button (manual mode)
- Auto Follow-Up checkbox
### Modified: Chat Log
Three-party with visual distinction:
- Host messages: existing style
- Real caller: labeled "Dave (caller)", distinct color
- AI caller: labeled "Tony (AI)", distinct color
### Modified: Caller Grid
When real caller is active, clicking an AI caller adds them as third party instead of starting fresh call. Indicator shows which AI callers have been on the show this session.
## Dependencies
- `twilio` Python package (for TwiML generation, REST API)
- Twilio account with phone number (~$1.15/mo + per-minute)
- Cloudflare tunnel for exposing webhook endpoints
- `audioop` or equivalent for mulaw encode/decode (stdlib in Python 3.11)
## Configuration
New env vars in `.env`:
```
TWILIO_ACCOUNT_SID=...
TWILIO_AUTH_TOKEN=...
TWILIO_PHONE_NUMBER=+1...
TWILIO_WEBHOOK_BASE_URL=https://your-tunnel.cloudflare.com
```

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,52 @@
# SignalWire Phone Call-In Design
## Goal
Replace browser-based WebSocket call-in with real phone calls via SignalWire. Callers dial 208-439-5853 and enter the show queue.
## Architecture
SignalWire handles PSTN connectivity. When a call comes in, SignalWire hits our webhook, we return XML telling it to open a bidirectional WebSocket stream with L16@16kHz audio. The audio flows through our existing pipeline — same queue, channel allocation, transcription, host mic streaming, and TTS streaming.
## Call Flow
1. Caller dials 208-439-5853
2. SignalWire hits `POST /api/signalwire/voice` (via Cloudflare tunnel)
3. We return `<Connect><Stream codec="L16@16000h">` XML
4. SignalWire opens WebSocket to `/api/signalwire/stream`
5. Caller enters queue — host sees phone number on dashboard
6. Host takes call — audio flows bidirectionally
7. Host hangs up — we call SignalWire REST API to end the phone call
## Audio Path
```
Phone → PSTN → SignalWire → WebSocket (base64 L16 JSON) → Our server
Our server → WebSocket (base64 L16 JSON) → SignalWire → PSTN → Phone
```
## SignalWire WebSocket Protocol
Incoming: `{"event": "media", "media": {"payload": "<base64 L16 PCM 16kHz>"}}`
Outgoing: `{"event": "media", "media": {"payload": "<base64 L16 PCM 16kHz>"}}`
Start: `{"event": "start", "start": {"streamSid": "...", "callSid": "..."}}`
Stop: `{"event": "stop"}`
## What Changes
- Remove: browser call-in page, browser WebSocket handler
- Add: SignalWire webhook + WebSocket handler, hangup via REST API
- Modify: CallerService (name→phone, base64 JSON encoding for send), dashboard (show phone number)
- Unchanged: AudioService, queue logic, transcription, TTS streaming, three-way calls
## Config
```
SIGNALWIRE_PROJECT_ID=8eb54732-ade3-4487-8b40-ecd2cd680df7
SIGNALWIRE_SPACE=macneil-media-group-llc.signalwire.com
SIGNALWIRE_TOKEN=PT9c9b61f44ee49914c614fed32aa5c3d7b9372b5199d81dec
SIGNALWIRE_PHONE=+12084395853
```
Webhook URL: `https://radioshow.macneilmediagroup.com/api/signalwire/voice`
No SDK needed — httpx for the one REST call (hangup).

View File

@@ -0,0 +1,855 @@
# SignalWire Phone Call-In Implementation Plan
> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
**Goal:** Replace browser-based WebSocket call-in with real phone calls via SignalWire (208-439-5853).
**Architecture:** SignalWire hits our webhook on inbound calls, we return XML to open a bidirectional WebSocket stream with L16@16kHz audio. The existing queue, channel allocation, transcription, host mic streaming, and TTS streaming are reused — only the WebSocket message format changes (base64 JSON instead of raw binary).
**Tech Stack:** Python/FastAPI, SignalWire Compatibility API (LaML XML + WebSocket), httpx for REST calls, existing audio pipeline.
---
## Task 1: Add SignalWire Config
**Files:**
- Modify: `backend/config.py`
- Modify: `.env`
**Step 1: Add SignalWire settings to config.py**
In `backend/config.py`, add these fields to the `Settings` class after the existing API keys block (after line 16):
```python
# SignalWire
signalwire_project_id: str = os.getenv("SIGNALWIRE_PROJECT_ID", "")
signalwire_space: str = os.getenv("SIGNALWIRE_SPACE", "")
signalwire_token: str = os.getenv("SIGNALWIRE_TOKEN", "")
signalwire_phone: str = os.getenv("SIGNALWIRE_PHONE", "")
```
**Step 2: Add SignalWire vars to .env**
Append to `.env`:
```
# SignalWire
SIGNALWIRE_PROJECT_ID=8eb54732-ade3-4487-8b40-ecd2cd680df7
SIGNALWIRE_SPACE=macneil-media-group-llc.signalwire.com
SIGNALWIRE_TOKEN=PT9c9b61f44ee49914c614fed32aa5c3d7b9372b5199d81dec
SIGNALWIRE_PHONE=+12084395853
```
**Step 3: Verify config loads**
```bash
cd /Users/lukemacneil/ai-podcast && python -c "from backend.config import settings; print(settings.signalwire_space)"
```
Expected: `macneil-media-group-llc.signalwire.com`
**Step 4: Commit**
```bash
git add backend/config.py .env
git commit -m "Add SignalWire configuration"
```
---
## Task 2: Update CallerService for SignalWire Protocol
**Files:**
- Modify: `backend/services/caller_service.py`
The CallerService currently sends raw binary PCM frames. SignalWire needs base64-encoded L16 PCM wrapped in JSON. Also swap `name` field to `phone` since callers now have phone numbers.
**Step 1: Update queue to use `phone` instead of `name`**
In `caller_service.py`, make these changes:
1. Update docstring (line 1): `"""Phone caller queue and audio stream service"""`
2. In `add_to_queue` (line 24): Change parameter `name` to `phone`, and update the dict:
```python
def add_to_queue(self, caller_id: str, phone: str):
with self._lock:
self._queue.append({
"caller_id": caller_id,
"phone": phone,
"queued_at": time.time(),
})
print(f"[Caller] {phone} added to queue (ID: {caller_id})")
```
3. In `get_queue` (line 38): Return `phone` instead of `name`:
```python
def get_queue(self) -> list[dict]:
now = time.time()
with self._lock:
return [
{
"caller_id": c["caller_id"],
"phone": c["phone"],
"wait_time": int(now - c["queued_at"]),
}
for c in self._queue
]
```
4. In `take_call` (line 62): Use `phone` instead of `name`:
```python
def take_call(self, caller_id: str) -> dict:
caller = None
with self._lock:
for c in self._queue:
if c["caller_id"] == caller_id:
caller = c
break
if caller:
self._queue = [c for c in self._queue if c["caller_id"] != caller_id]
if not caller:
raise ValueError(f"Caller {caller_id} not in queue")
channel = self.allocate_channel()
self._caller_counter += 1
phone = caller["phone"]
call_info = {
"caller_id": caller_id,
"phone": phone,
"channel": channel,
"started_at": time.time(),
}
self.active_calls[caller_id] = call_info
print(f"[Caller] {phone} taken on air — channel {channel}")
return call_info
```
5. In `hangup` (line 89): Use `phone` instead of `name`:
```python
def hangup(self, caller_id: str):
call_info = self.active_calls.pop(caller_id, None)
if call_info:
self.release_channel(call_info["channel"])
print(f"[Caller] {call_info['phone']} hung up — channel {call_info['channel']} released")
self._websockets.pop(caller_id, None)
```
**Step 2: Update `send_audio_to_caller` for SignalWire JSON format**
Replace the existing `send_audio_to_caller` method with:
```python
async def send_audio_to_caller(self, caller_id: str, pcm_data: bytes, sample_rate: int):
"""Send small audio chunk to caller via SignalWire WebSocket.
Encodes L16 PCM as base64 JSON per SignalWire protocol.
"""
ws = self._websockets.get(caller_id)
if not ws:
return
try:
import base64
if sample_rate != 16000:
audio = np.frombuffer(pcm_data, dtype=np.int16).astype(np.float32) / 32768.0
ratio = 16000 / sample_rate
out_len = int(len(audio) * ratio)
indices = (np.arange(out_len) / ratio).astype(int)
indices = np.clip(indices, 0, len(audio) - 1)
audio = audio[indices]
pcm_data = (audio * 32767).astype(np.int16).tobytes()
payload = base64.b64encode(pcm_data).decode('ascii')
import json
await ws.send_text(json.dumps({
"event": "media",
"media": {"payload": payload}
}))
except Exception as e:
print(f"[Caller] Failed to send audio: {e}")
```
**Step 3: Update `stream_audio_to_caller` for SignalWire JSON format**
Replace the existing `stream_audio_to_caller` method with:
```python
async def stream_audio_to_caller(self, caller_id: str, pcm_data: bytes, sample_rate: int):
"""Stream large audio (TTS) to caller in real-time chunks via SignalWire WebSocket."""
ws = self._websockets.get(caller_id)
if not ws:
return
self.streaming_tts = True
try:
import base64
import json
audio = np.frombuffer(pcm_data, dtype=np.int16).astype(np.float32) / 32768.0
if sample_rate != 16000:
ratio = 16000 / sample_rate
out_len = int(len(audio) * ratio)
indices = (np.arange(out_len) / ratio).astype(int)
indices = np.clip(indices, 0, len(audio) - 1)
audio = audio[indices]
chunk_samples = 960
for i in range(0, len(audio), chunk_samples):
if caller_id not in self._websockets:
break
chunk = audio[i:i + chunk_samples]
pcm_chunk = (chunk * 32767).astype(np.int16).tobytes()
payload = base64.b64encode(pcm_chunk).decode('ascii')
await ws.send_text(json.dumps({
"event": "media",
"media": {"payload": payload}
}))
await asyncio.sleep(0.055)
except Exception as e:
print(f"[Caller] Failed to stream audio: {e}")
finally:
self.streaming_tts = False
```
**Step 4: Remove `notify_caller` and `disconnect_caller` methods**
These sent browser-specific JSON control messages. SignalWire callers are disconnected via REST API (handled in main.py). Delete methods `notify_caller` (line 168) and `disconnect_caller` (line 175). They will be replaced with a REST-based hangup in Task 4.
**Step 5: Add `call_sid` tracking for SignalWire call hangup**
Add a dict to track SignalWire call SIDs so we can end calls via REST:
In `__init__`, after `self._websockets` line, add:
```python
self._call_sids: dict[str, str] = {} # caller_id -> SignalWire callSid
```
Add methods:
```python
def register_call_sid(self, caller_id: str, call_sid: str):
"""Track SignalWire callSid for a caller"""
self._call_sids[caller_id] = call_sid
def get_call_sid(self, caller_id: str) -> str | None:
"""Get SignalWire callSid for a caller"""
return self._call_sids.get(caller_id)
def unregister_call_sid(self, caller_id: str):
"""Remove callSid tracking"""
self._call_sids.pop(caller_id, None)
```
In `reset`, also clear `self._call_sids`:
```python
self._call_sids.clear()
```
In `hangup`, also clean up call_sid:
```python
self._call_sids.pop(caller_id, None)
```
**Step 6: Run existing tests**
```bash
cd /Users/lukemacneil/ai-podcast && python -m pytest tests/test_caller_service.py -v
```
Tests will likely need updates due to `name``phone` rename. Fix any failures.
**Step 7: Commit**
```bash
git add backend/services/caller_service.py
git commit -m "Update CallerService for SignalWire protocol"
```
---
## Task 3: Add SignalWire Voice Webhook
**Files:**
- Modify: `backend/main.py`
**Step 1: Add the voice webhook endpoint**
Add after the existing route definitions (after line 421), replacing the `/call-in` route:
```python
# --- SignalWire Endpoints ---
from fastapi import Request, Response
@app.post("/api/signalwire/voice")
async def signalwire_voice_webhook(request: Request):
"""Handle inbound call from SignalWire — return XML to start bidirectional stream"""
form = await request.form()
caller_phone = form.get("From", "Unknown")
call_sid = form.get("CallSid", "")
print(f"[SignalWire] Inbound call from {caller_phone} (CallSid: {call_sid})")
# Build WebSocket URL from the request
ws_scheme = "wss"
host = request.headers.get("host", "radioshow.macneilmediagroup.com")
stream_url = f"{ws_scheme}://{host}/api/signalwire/stream"
xml = f"""<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Connect>
<Stream url="{stream_url}" codec="L16@16000h">
<Parameter name="caller_phone" value="{caller_phone}"/>
<Parameter name="call_sid" value="{call_sid}"/>
</Stream>
</Connect>
</Response>"""
return Response(content=xml, media_type="application/xml")
```
**Step 2: Remove the `/call-in` route**
Delete these lines (around line 419-421):
```python
@app.get("/call-in")
async def call_in_page():
return FileResponse(frontend_dir / "call-in.html")
```
**Step 3: Verify server starts**
```bash
cd /Users/lukemacneil/ai-podcast && python -c "from backend.main import app; print('OK')"
```
**Step 4: Commit**
```bash
git add backend/main.py
git commit -m "Add SignalWire voice webhook, remove call-in route"
```
---
## Task 4: Add SignalWire WebSocket Stream Handler
**Files:**
- Modify: `backend/main.py`
This replaces the browser caller WebSocket handler at `/api/caller/stream`.
**Step 1: Replace the browser WebSocket handler**
Delete the entire `caller_audio_stream` function (the `@app.websocket("/api/caller/stream")` handler, lines 807-887).
Add the new SignalWire WebSocket handler:
```python
@app.websocket("/api/signalwire/stream")
async def signalwire_audio_stream(websocket: WebSocket):
"""Handle SignalWire bidirectional audio stream"""
await websocket.accept()
caller_id = str(uuid.uuid4())[:8]
caller_phone = "Unknown"
call_sid = ""
audio_buffer = bytearray()
CHUNK_DURATION_S = 3
SAMPLE_RATE = 16000
chunk_samples = CHUNK_DURATION_S * SAMPLE_RATE
stream_started = False
try:
while True:
raw = await websocket.receive_text()
msg = json.loads(raw)
event = msg.get("event")
if event == "start":
# Extract caller info from stream parameters
params = {}
for p in msg.get("start", {}).get("customParameters", {}):
pass
# customParameters comes as a dict
custom = msg.get("start", {}).get("customParameters", {})
caller_phone = custom.get("caller_phone", "Unknown")
call_sid = custom.get("call_sid", "")
stream_started = True
print(f"[SignalWire WS] Stream started: {caller_phone} (CallSid: {call_sid})")
# Add to queue and register
caller_service.add_to_queue(caller_id, caller_phone)
caller_service.register_websocket(caller_id, websocket)
if call_sid:
caller_service.register_call_sid(caller_id, call_sid)
elif event == "media" and stream_started:
# Decode base64 L16 PCM audio
import base64
payload = msg.get("media", {}).get("payload", "")
if not payload:
continue
pcm_data = base64.b64decode(payload)
# Only process audio if caller is on air
call_info = caller_service.active_calls.get(caller_id)
if not call_info:
continue
audio_buffer.extend(pcm_data)
# Route to configured live caller Loopback channel
audio_service.route_real_caller_audio(pcm_data, SAMPLE_RATE)
# Transcribe when we have enough audio
if len(audio_buffer) >= chunk_samples * 2:
pcm_chunk = bytes(audio_buffer[:chunk_samples * 2])
audio_buffer = audio_buffer[chunk_samples * 2:]
asyncio.create_task(
_handle_real_caller_transcription(caller_id, pcm_chunk, SAMPLE_RATE)
)
elif event == "stop":
print(f"[SignalWire WS] Stream stopped: {caller_phone}")
break
except WebSocketDisconnect:
print(f"[SignalWire WS] Disconnected: {caller_id} ({caller_phone})")
except Exception as e:
print(f"[SignalWire WS] Error: {e}")
finally:
caller_service.unregister_websocket(caller_id)
caller_service.unregister_call_sid(caller_id)
caller_service.remove_from_queue(caller_id)
if caller_id in caller_service.active_calls:
caller_service.hangup(caller_id)
if session.active_real_caller and session.active_real_caller.get("caller_id") == caller_id:
session.active_real_caller = None
if len(caller_service.active_calls) == 0:
audio_service.stop_host_stream()
if audio_buffer:
asyncio.create_task(
_handle_real_caller_transcription(caller_id, bytes(audio_buffer), SAMPLE_RATE)
)
```
**Step 2: Commit**
```bash
git add backend/main.py
git commit -m "Add SignalWire WebSocket stream handler, remove browser handler"
```
---
## Task 5: Update Hangup and Queue Endpoints for SignalWire
**Files:**
- Modify: `backend/main.py`
When the host hangs up or drops a caller, we need to end the actual phone call via SignalWire's REST API.
**Step 1: Add SignalWire hangup helper**
Add this function near the top of `main.py` (after imports):
```python
async def _signalwire_end_call(call_sid: str):
"""End a phone call via SignalWire REST API"""
if not call_sid or not settings.signalwire_space:
return
try:
url = f"https://{settings.signalwire_space}/api/laml/2010-04-01/Accounts/{settings.signalwire_project_id}/Calls/{call_sid}"
async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.post(
url,
data={"Status": "completed"},
auth=(settings.signalwire_project_id, settings.signalwire_token),
)
print(f"[SignalWire] End call {call_sid}: {response.status_code}")
except Exception as e:
print(f"[SignalWire] Failed to end call {call_sid}: {e}")
```
Also add `import httpx` at the top of main.py if not already present.
**Step 2: Update `take_call_from_queue`**
In the `take_call_from_queue` endpoint, update `name` references to `phone`:
```python
@app.post("/api/queue/take/{caller_id}")
async def take_call_from_queue(caller_id: str):
"""Take a caller off hold and put them on air"""
try:
call_info = caller_service.take_call(caller_id)
except ValueError as e:
raise HTTPException(404, str(e))
session.active_real_caller = {
"caller_id": call_info["caller_id"],
"channel": call_info["channel"],
"phone": call_info["phone"],
}
# Start host mic streaming if this is the first real caller
if len(caller_service.active_calls) == 1:
_start_host_audio_sender()
audio_service.start_host_stream(_host_audio_sync_callback)
return {
"status": "on_air",
"caller": call_info,
}
```
Note: The `notify_caller` call is removed — SignalWire callers don't need a JSON status message, they're already connected via the phone.
**Step 3: Update `drop_from_queue`**
End the phone call when dropping:
```python
@app.post("/api/queue/drop/{caller_id}")
async def drop_from_queue(caller_id: str):
"""Drop a caller from the queue"""
call_sid = caller_service.get_call_sid(caller_id)
caller_service.remove_from_queue(caller_id)
if call_sid:
await _signalwire_end_call(call_sid)
return {"status": "dropped"}
```
**Step 4: Update `hangup_real_caller`**
End the phone call when hanging up:
```python
@app.post("/api/hangup/real")
async def hangup_real_caller():
"""Hang up on real caller — disconnect immediately, summarize in background"""
if not session.active_real_caller:
raise HTTPException(400, "No active real caller")
caller_id = session.active_real_caller["caller_id"]
caller_phone = session.active_real_caller["phone"]
conversation_snapshot = list(session.conversation)
auto_followup_enabled = session.auto_followup
# End the phone call via SignalWire
call_sid = caller_service.get_call_sid(caller_id)
caller_service.hangup(caller_id)
if call_sid:
asyncio.create_task(_signalwire_end_call(call_sid))
# Stop host streaming if no more active callers
if len(caller_service.active_calls) == 0:
audio_service.stop_host_stream()
session.active_real_caller = None
# Play hangup sound in background
import threading
hangup_sound = settings.sounds_dir / "hangup.wav"
if hangup_sound.exists():
threading.Thread(target=audio_service.play_sfx, args=(str(hangup_sound),), daemon=True).start()
# Summarize and store history in background
asyncio.create_task(
_summarize_real_call(caller_phone, conversation_snapshot, auto_followup_enabled)
)
return {
"status": "disconnected",
"caller": caller_phone,
}
```
**Step 5: Update `_handle_real_caller_transcription`**
Change `caller_name` to `caller_phone`:
```python
async def _handle_real_caller_transcription(caller_id: str, pcm_data: bytes, sample_rate: int):
"""Transcribe a chunk of real caller audio and add to conversation"""
call_info = caller_service.active_calls.get(caller_id)
if not call_info:
return
text = await transcribe_audio(pcm_data, source_sample_rate=sample_rate)
if not text or not text.strip():
return
caller_phone = call_info["phone"]
print(f"[Real Caller] {caller_phone}: {text}")
session.add_message(f"real_caller:{caller_phone}", text)
if session.ai_respond_mode == "auto" and session.current_caller_key:
asyncio.create_task(_check_ai_auto_respond(text, caller_phone))
```
**Step 6: Update `_summarize_real_call`**
Change `caller_name` parameter to `caller_phone`:
```python
async def _summarize_real_call(caller_phone: str, conversation: list, auto_followup_enabled: bool):
"""Background task: summarize call and store in history"""
summary = ""
if conversation:
transcript_text = "\n".join(
f"{msg['role']}: {msg['content']}" for msg in conversation
)
summary = await llm_service.generate(
messages=[{"role": "user", "content": f"Summarize this radio show call in 1-2 sentences:\n{transcript_text}"}],
system_prompt="You summarize radio show conversations concisely. Focus on what the caller talked about and any emotional moments.",
)
session.call_history.append(CallRecord(
caller_type="real",
caller_name=caller_phone,
summary=summary,
transcript=conversation,
))
print(f"[Real Caller] {caller_phone} call summarized: {summary[:80]}...")
if auto_followup_enabled:
await _auto_followup(summary)
```
**Step 7: Update `_check_ai_auto_respond`**
Change parameter name from `real_caller_name` to `real_caller_phone`:
```python
async def _check_ai_auto_respond(real_caller_text: str, real_caller_phone: str):
```
(The body doesn't use the name/phone parameter in any way that needs changing.)
**Step 8: Update TTS streaming references**
In `text_to_speech` endpoint and `_check_ai_auto_respond`, the `session.active_real_caller` dict now uses `phone` instead of `name`. No code change needed for the TTS streaming since it only uses `caller_id`.
**Step 9: Verify server starts**
```bash
cd /Users/lukemacneil/ai-podcast && python -c "from backend.main import app; print('OK')"
```
**Step 10: Commit**
```bash
git add backend/main.py
git commit -m "Update hangup and queue endpoints for SignalWire REST API"
```
---
## Task 6: Update Frontend for Phone Callers
**Files:**
- Modify: `frontend/js/app.js`
- Modify: `frontend/index.html`
**Step 1: Update queue rendering in app.js**
In `renderQueue` function (around line 875), change `caller.name` to `caller.phone`:
```javascript
el.innerHTML = queue.map(caller => {
const mins = Math.floor(caller.wait_time / 60);
const secs = caller.wait_time % 60;
const waitStr = mins > 0 ? `${mins}m ${secs}s` : `${secs}s`;
return `
<div class="queue-item">
<span class="queue-name">${caller.phone}</span>
<span class="queue-wait">waiting ${waitStr}</span>
<button class="queue-take-btn" onclick="takeCall('${caller.caller_id}')">Take Call</button>
<button class="queue-drop-btn" onclick="dropCall('${caller.caller_id}')">Drop</button>
</div>
`;
}).join('');
```
**Step 2: Update `takeCall` log message**
In `takeCall` function (around line 896), change `data.caller.name` to `data.caller.phone`:
```javascript
if (data.status === 'on_air') {
showRealCaller(data.caller);
log(`${data.caller.phone} is on air — Channel ${data.caller.channel}`);
}
```
**Step 3: Update `showRealCaller` to use phone**
In `showRealCaller` function (around line 939):
```javascript
function showRealCaller(callerInfo) {
const nameEl = document.getElementById('real-caller-name');
const chEl = document.getElementById('real-caller-channel');
if (nameEl) nameEl.textContent = callerInfo.phone;
if (chEl) chEl.textContent = `Ch ${callerInfo.channel}`;
```
**Step 4: Update index.html queue section header**
In `frontend/index.html`, change the queue section header (line 56) — remove the call-in page link:
```html
<section class="queue-section">
<h2>Incoming Calls</h2>
<div id="call-queue" class="call-queue">
```
**Step 5: Bump cache version in index.html**
Find the app.js script tag and bump the version:
```html
<script src="/js/app.js?v=13"></script>
```
**Step 6: Commit**
```bash
git add frontend/js/app.js frontend/index.html
git commit -m "Update frontend for phone caller display"
```
---
## Task 7: Remove Browser Call-In Files
**Files:**
- Delete: `frontend/call-in.html`
- Delete: `frontend/js/call-in.js`
**Step 1: Delete files**
```bash
cd /Users/lukemacneil/ai-podcast && rm frontend/call-in.html frontend/js/call-in.js
```
**Step 2: Commit**
```bash
git add frontend/call-in.html frontend/js/call-in.js
git commit -m "Remove browser call-in page"
```
---
## Task 8: Update Tests
**Files:**
- Modify: `tests/test_caller_service.py`
**Step 1: Update tests for `name` → `phone` rename**
Throughout `test_caller_service.py`, change:
- `add_to_queue(caller_id, "TestName")``add_to_queue(caller_id, "+15551234567")`
- `caller["name"]``caller["phone"]`
- `call_info["name"]``call_info["phone"]`
Also remove any tests for `notify_caller` or `disconnect_caller` if they exist, since those methods were removed.
**Step 2: Run all tests**
```bash
cd /Users/lukemacneil/ai-podcast && python -m pytest tests/ -v
```
Expected: All pass.
**Step 3: Commit**
```bash
git add tests/
git commit -m "Update tests for SignalWire phone caller format"
```
---
## Task 9: Configure SignalWire Webhook and End-to-End Test
**Step 1: Start the server**
```bash
cd /Users/lukemacneil/ai-podcast && python -m uvicorn backend.main:app --reload --host 0.0.0.0 --port 8000
```
**Step 2: Verify webhook endpoint responds**
```bash
curl -X POST http://localhost:8000/api/signalwire/voice \
-d "From=+15551234567&CallSid=test123" \
-H "Content-Type: application/x-www-form-urlencoded"
```
Expected: XML response with `<Connect><Stream>` containing the WebSocket URL.
**Step 3: Verify Cloudflare tunnel is running**
```bash
curl -s https://radioshow.macneilmediagroup.com/api/server/status
```
Expected: JSON response with `"status": "running"`.
**Step 4: Configure SignalWire webhook**
In the SignalWire dashboard:
1. Go to Phone Numbers → 208-439-5853
2. Set "When a call comes in" to: `https://radioshow.macneilmediagroup.com/api/signalwire/voice`
3. Method: POST
4. Handler type: LaML Webhooks
**Step 5: Test with a real call**
Call 208-439-5853 from a phone. Expected:
1. Call connects (no ringing/hold — goes straight to stream)
2. Caller appears in queue on host dashboard with phone number
3. Host clicks "Take Call" → audio flows bidirectionally
4. Host clicks "Hang Up" → phone call ends
**Step 6: Commit any fixes needed**
```bash
git add -A
git commit -m "Final SignalWire integration fixes"
```
---
## Summary
| Task | What | Key Files |
|------|------|-----------|
| 1 | SignalWire config | `config.py`, `.env` |
| 2 | CallerService protocol update | `caller_service.py` |
| 3 | Voice webhook endpoint | `main.py` |
| 4 | WebSocket stream handler | `main.py` |
| 5 | Hangup/queue via REST API | `main.py` |
| 6 | Frontend phone display | `app.js`, `index.html` |
| 7 | Remove browser call-in | `call-in.html`, `call-in.js` |
| 8 | Update tests | `tests/` |
| 9 | Configure & test | SignalWire dashboard |
Tasks 1-5 are sequential backend. Task 6-7 are frontend (can parallel after task 5). Task 8 after task 2. Task 9 is final integration test.

View File

@@ -593,6 +593,23 @@ section h2 {
.hangup-btn.small { font-size: 0.75rem; padding: 0.2rem 0.5rem; } .hangup-btn.small { font-size: 0.75rem; padding: 0.2rem 0.5rem; }
.auto-followup-label { display: flex; align-items: center; gap: 0.4rem; font-size: 0.8rem; color: #999; margin-top: 0.5rem; } .auto-followup-label { display: flex; align-items: center; gap: 0.4rem; font-size: 0.8rem; color: #999; margin-top: 0.5rem; }
/* Returning Caller */
.caller-btn.returning {
border-color: #f9a825;
color: #f9a825;
}
.caller-btn.returning:hover {
border-color: #fdd835;
}
/* Screening Badges */
.screening-badge { font-size: 0.7rem; padding: 0.1rem 0.4rem; border-radius: 3px; font-weight: bold; }
.screening-badge.screening { background: #e65100; color: white; animation: pulse 1.5s infinite; }
.screening-badge.screened { background: #2e7d32; color: white; }
.screening-summary { font-size: 0.8rem; color: #aaa; font-style: italic; flex-basis: 100%; margin-top: 0.2rem; }
.queue-item { flex-wrap: wrap; }
/* Three-Party Chat */ /* Three-Party Chat */
.message.real-caller { border-left: 3px solid #c62828; padding-left: 0.5rem; } .message.real-caller { border-left: 3px solid #c62828; padding-left: 0.5rem; }
.message.ai-caller { border-left: 3px solid #1565c0; padding-left: 0.5rem; } .message.ai-caller { border-left: 3px solid #1565c0; padding-left: 0.5rem; }

View File

@@ -13,6 +13,7 @@
<div class="header-buttons"> <div class="header-buttons">
<button id="on-air-btn" class="on-air-btn off">OFF AIR</button> <button id="on-air-btn" class="on-air-btn off">OFF AIR</button>
<button id="new-session-btn" class="new-session-btn">New Session</button> <button id="new-session-btn" class="new-session-btn">New Session</button>
<button id="export-session-btn">Export</button>
<button id="settings-btn">Settings</button> <button id="settings-btn">Settings</button>
</div> </div>
</header> </header>
@@ -142,6 +143,7 @@
<label>Live Ch <input type="number" id="live-caller-channel" value="9" min="1" max="16" class="channel-input"></label> <label>Live Ch <input type="number" id="live-caller-channel" value="9" min="1" max="16" class="channel-input"></label>
<label>Music Ch <input type="number" id="music-channel" value="2" min="1" max="16" class="channel-input"></label> <label>Music Ch <input type="number" id="music-channel" value="2" min="1" max="16" class="channel-input"></label>
<label>SFX Ch <input type="number" id="sfx-channel" value="3" min="1" max="16" class="channel-input"></label> <label>SFX Ch <input type="number" id="sfx-channel" value="3" min="1" max="16" class="channel-input"></label>
<label>Ad Ch <input type="number" id="ad-channel" value="11" min="1" max="16" class="channel-input"></label>
</div> </div>
</div> </div>

View File

@@ -85,6 +85,9 @@ function initEventListeners() {
}); });
} }
// Export session
document.getElementById('export-session-btn')?.addEventListener('click', exportSession);
// Server controls // Server controls
document.getElementById('restart-server-btn')?.addEventListener('click', restartServer); document.getElementById('restart-server-btn')?.addEventListener('click', restartServer);
document.getElementById('stop-server-btn')?.addEventListener('click', stopServer); document.getElementById('stop-server-btn')?.addEventListener('click', stopServer);
@@ -281,12 +284,14 @@ async function loadAudioDevices() {
const liveCallerCh = document.getElementById('live-caller-channel'); const liveCallerCh = document.getElementById('live-caller-channel');
const musicCh = document.getElementById('music-channel'); const musicCh = document.getElementById('music-channel');
const sfxCh = document.getElementById('sfx-channel'); const sfxCh = document.getElementById('sfx-channel');
const adCh = document.getElementById('ad-channel');
if (inputCh) inputCh.value = settings.input_channel || 1; if (inputCh) inputCh.value = settings.input_channel || 1;
if (callerCh) callerCh.value = settings.caller_channel || 1; if (callerCh) callerCh.value = settings.caller_channel || 1;
if (liveCallerCh) liveCallerCh.value = settings.live_caller_channel || 9; if (liveCallerCh) liveCallerCh.value = settings.live_caller_channel || 9;
if (musicCh) musicCh.value = settings.music_channel || 2; if (musicCh) musicCh.value = settings.music_channel || 2;
if (sfxCh) sfxCh.value = settings.sfx_channel || 3; if (sfxCh) sfxCh.value = settings.sfx_channel || 3;
if (adCh) adCh.value = settings.ad_channel || 11;
// Phone filter setting // Phone filter setting
const phoneFilterEl = document.getElementById('phone-filter'); const phoneFilterEl = document.getElementById('phone-filter');
@@ -310,6 +315,7 @@ async function saveAudioDevices() {
const liveCallerChannel = document.getElementById('live-caller-channel')?.value; const liveCallerChannel = document.getElementById('live-caller-channel')?.value;
const musicChannel = document.getElementById('music-channel')?.value; const musicChannel = document.getElementById('music-channel')?.value;
const sfxChannel = document.getElementById('sfx-channel')?.value; const sfxChannel = document.getElementById('sfx-channel')?.value;
const adChannel = document.getElementById('ad-channel')?.value;
const phoneFilterChecked = document.getElementById('phone-filter')?.checked ?? false; const phoneFilterChecked = document.getElementById('phone-filter')?.checked ?? false;
await fetch('/api/audio/settings', { await fetch('/api/audio/settings', {
@@ -323,6 +329,7 @@ async function saveAudioDevices() {
live_caller_channel: liveCallerChannel ? parseInt(liveCallerChannel) : 9, live_caller_channel: liveCallerChannel ? parseInt(liveCallerChannel) : 9,
music_channel: musicChannel ? parseInt(musicChannel) : 2, music_channel: musicChannel ? parseInt(musicChannel) : 2,
sfx_channel: sfxChannel ? parseInt(sfxChannel) : 3, sfx_channel: sfxChannel ? parseInt(sfxChannel) : 3,
ad_channel: adChannel ? parseInt(adChannel) : 11,
phone_filter: phoneFilterChecked phone_filter: phoneFilterChecked
}) })
}); });
@@ -347,7 +354,8 @@ async function loadCallers() {
data.callers.forEach(caller => { data.callers.forEach(caller => {
const btn = document.createElement('button'); const btn = document.createElement('button');
btn.className = 'caller-btn'; btn.className = 'caller-btn';
btn.textContent = caller.name; if (caller.returning) btn.classList.add('returning');
btn.textContent = caller.returning ? `\u2605 ${caller.name}` : caller.name;
btn.dataset.key = caller.key; btn.dataset.key = caller.key;
btn.addEventListener('click', () => startCall(caller.key, caller.name)); btn.addEventListener('click', () => startCall(caller.key, caller.name));
grid.appendChild(btn); grid.appendChild(btn);
@@ -992,10 +1000,21 @@ function renderQueue(queue) {
const mins = Math.floor(caller.wait_time / 60); const mins = Math.floor(caller.wait_time / 60);
const secs = caller.wait_time % 60; const secs = caller.wait_time % 60;
const waitStr = mins > 0 ? `${mins}m ${secs}s` : `${secs}s`; const waitStr = mins > 0 ? `${mins}m ${secs}s` : `${secs}s`;
const displayName = caller.caller_name || caller.phone;
const screenBadge = caller.screening_status === 'complete'
? '<span class="screening-badge screened">Screened</span>'
: caller.screening_status === 'screening'
? '<span class="screening-badge screening">Screening...</span>'
: '';
const summary = caller.screening_summary
? `<div class="screening-summary">${caller.screening_summary}</div>`
: '';
return ` return `
<div class="queue-item"> <div class="queue-item">
<span class="queue-name">${caller.phone}</span> <span class="queue-name">${displayName}</span>
${screenBadge}
<span class="queue-wait">waiting ${waitStr}</span> <span class="queue-wait">waiting ${waitStr}</span>
${summary}
<button class="queue-take-btn" onclick="takeCall('${caller.caller_id}')">Take Call</button> <button class="queue-take-btn" onclick="takeCall('${caller.caller_id}')">Take Call</button>
<button class="queue-drop-btn" onclick="dropCall('${caller.caller_id}')">Drop</button> <button class="queue-drop-btn" onclick="dropCall('${caller.caller_id}')">Drop</button>
</div> </div>
@@ -1151,6 +1170,23 @@ async function fetchConversationUpdates() {
} }
async function exportSession() {
try {
const res = await safeFetch('/api/session/export');
const blob = new Blob([JSON.stringify(res, null, 2)], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `session-${res.session_id}.json`;
a.click();
URL.revokeObjectURL(url);
log(`Exported session: ${res.call_count} calls`);
} catch (err) {
log('Export error: ' + err.message);
}
}
async function stopServer() { async function stopServer() {
if (!confirm('Stop the server? You will need to restart it manually.')) return; if (!confirm('Stop the server? You will need to restart it manually.')) return;

View File

@@ -344,6 +344,7 @@ def main():
parser.add_argument("--dry-run", "-d", action="store_true", help="Generate metadata but don't publish") parser.add_argument("--dry-run", "-d", action="store_true", help="Generate metadata but don't publish")
parser.add_argument("--title", "-t", help="Override generated title") parser.add_argument("--title", "-t", help="Override generated title")
parser.add_argument("--description", help="Override generated description") parser.add_argument("--description", help="Override generated description")
parser.add_argument("--session-data", "-s", help="Path to session export JSON (from /api/session/export)")
args = parser.parse_args() args = parser.parse_args()
audio_path = Path(args.audio_file).expanduser().resolve() audio_path = Path(args.audio_file).expanduser().resolve()
@@ -358,12 +359,28 @@ def main():
episode_number = get_next_episode_number() episode_number = get_next_episode_number()
print(f"Episode number: {episode_number}") print(f"Episode number: {episode_number}")
# Load session data if provided
session_data = None
if args.session_data:
session_path = Path(args.session_data).expanduser().resolve()
if session_path.exists():
with open(session_path) as f:
session_data = json.load(f)
print(f"Loaded session data: {session_data.get('call_count', 0)} calls")
else:
print(f"Warning: Session data file not found: {session_path}")
# Step 1: Transcribe # Step 1: Transcribe
transcript = transcribe_audio(str(audio_path)) transcript = transcribe_audio(str(audio_path))
# Step 2: Generate metadata # Step 2: Generate metadata
metadata = generate_metadata(transcript, episode_number) metadata = generate_metadata(transcript, episode_number)
# Use session chapters if available (more accurate than LLM-generated)
if session_data and session_data.get("chapters"):
metadata["chapters"] = session_data["chapters"]
print(f" Using {len(metadata['chapters'])} chapters from session data")
# Apply overrides # Apply overrides
if args.title: if args.title:
metadata["title"] = args.title metadata["title"] = args.title
@@ -374,6 +391,13 @@ def main():
chapters_path = audio_path.with_suffix(".chapters.json") chapters_path = audio_path.with_suffix(".chapters.json")
save_chapters(metadata, str(chapters_path)) save_chapters(metadata, str(chapters_path))
# Save transcript alongside episode if session data available
if session_data and session_data.get("transcript"):
transcript_path = audio_path.with_suffix(".transcript.txt")
with open(transcript_path, "w") as f:
f.write(session_data["transcript"])
print(f" Transcript saved to: {transcript_path}")
if args.dry_run: if args.dry_run:
print("\n[DRY RUN] Would publish with:") print("\n[DRY RUN] Would publish with:")
print(f" Title: {metadata['title']}") print(f" Title: {metadata['title']}")

View File

@@ -145,6 +145,7 @@ a:hover {
flex-shrink: 0; flex-shrink: 0;
} }
.btn-hiw { background: var(--accent); }
.btn-spotify { background: #1DB954; } .btn-spotify { background: #1DB954; }
.btn-youtube { background: #FF0000; } .btn-youtube { background: #FF0000; }
.btn-apple { background: #A033FF; } .btn-apple { background: #A033FF; }
@@ -154,7 +155,7 @@ a:hover {
.episodes-section { .episodes-section {
max-width: 900px; max-width: 900px;
margin: 0 auto; margin: 0 auto;
padding: 2rem 1.5rem 8rem; padding: 2rem 1.5rem 3rem;
} }
.episodes-section h2 { .episodes-section h2 {
@@ -249,6 +250,114 @@ a:hover {
padding: 2rem 0; padding: 2rem 0;
} }
/* Testimonials */
.testimonials-section {
max-width: 900px;
margin: 0 auto;
padding: 2rem 1.5rem 3rem;
}
.testimonials-section h2 {
font-size: 1.5rem;
margin-bottom: 1.5rem;
font-weight: 700;
text-align: center;
}
.testimonials-slider {
overflow: hidden;
position: relative;
}
.testimonials-track {
display: flex;
transition: transform 0.5s ease;
will-change: transform;
}
.testimonial-card {
flex: 0 0 100%;
max-width: 100%;
padding: 0 0.5rem;
box-sizing: border-box;
}
.testimonial-inner {
background: var(--bg-light);
border-radius: var(--radius);
padding: 1.5rem;
border-left: 3px solid var(--accent);
overflow: hidden;
}
.testimonial-stars {
color: var(--accent);
font-size: 1.2rem;
letter-spacing: 2px;
margin-bottom: 1rem;
}
.testimonial-text {
font-size: 1.05rem;
line-height: 1.7;
color: var(--text);
margin-bottom: 1.25rem;
font-style: italic;
word-wrap: break-word;
overflow-wrap: break-word;
white-space: normal;
}
.testimonial-author {
display: flex;
align-items: center;
gap: 0.75rem;
}
.testimonial-name {
font-weight: 700;
font-size: 0.95rem;
color: var(--text);
}
.testimonial-location {
font-size: 0.85rem;
color: var(--text-muted);
}
.testimonial-location::before {
content: "\2014 ";
}
.testimonials-dots {
display: flex;
justify-content: center;
gap: 0.5rem;
margin-top: 1.5rem;
}
.testimonial-dot {
width: 8px;
height: 8px;
border-radius: 50%;
background: var(--text-muted);
opacity: 0.4;
border: none;
cursor: pointer;
padding: 0;
transition: opacity 0.3s, background 0.3s, transform 0.3s;
}
.testimonial-dot:hover {
opacity: 0.7;
}
.testimonial-dot.active {
background: var(--accent);
opacity: 1;
transform: scale(1.3);
}
/* Sticky Player */ /* Sticky Player */
.sticky-player { .sticky-player {
position: fixed; position: fixed;
@@ -368,6 +477,38 @@ a:hover {
color: var(--text); color: var(--text);
} }
.footer-projects {
margin: 1.25rem 0;
padding: 1rem 0;
border-top: 1px solid #2a2015;
border-bottom: 1px solid #2a2015;
}
.footer-projects-label {
display: block;
font-size: 0.7rem;
text-transform: uppercase;
letter-spacing: 0.15em;
color: var(--text-muted);
margin-bottom: 0.5rem;
}
.footer-projects-links {
display: flex;
justify-content: center;
gap: 1.5rem;
}
.footer-projects-links a {
color: var(--text-muted);
font-size: 0.85rem;
transition: color 0.2s;
}
.footer-projects-links a:hover {
color: var(--accent);
}
.footer-contact { .footer-contact {
margin-bottom: 0.75rem; margin-bottom: 0.75rem;
} }
@@ -380,6 +521,292 @@ a:hover {
color: var(--accent-hover); color: var(--accent-hover);
} }
/* Page Nav */
.page-nav {
max-width: 900px;
margin: 0 auto;
padding: 1.25rem 1.5rem;
}
.nav-home {
font-weight: 700;
font-size: 1rem;
color: var(--text);
}
.nav-home:hover {
color: var(--accent);
}
/* Page Header */
.page-header {
max-width: 900px;
margin: 0 auto;
padding: 2rem 1.5rem 1rem;
text-align: center;
}
.page-header h1 {
font-size: 2.5rem;
font-weight: 800;
margin-bottom: 0.75rem;
}
.page-subtitle {
font-size: 1.15rem;
color: var(--text-muted);
max-width: 550px;
margin: 0 auto;
}
/* How It Works */
.hiw-section {
max-width: 900px;
margin: 0 auto;
padding: 2rem 1.5rem;
}
.hiw-section h2 {
font-size: 1.5rem;
font-weight: 700;
margin-bottom: 1.5rem;
text-align: center;
}
/* Diagram */
.hiw-hero-card {
background: var(--bg-light);
border-radius: var(--radius);
padding: 2rem;
}
.hiw-diagram {
display: flex;
flex-direction: column;
align-items: center;
gap: 0.25rem;
}
.diagram-row {
display: flex;
justify-content: center;
gap: 1rem;
width: 100%;
}
.diagram-row-split {
flex-wrap: wrap;
}
.diagram-box {
background: var(--bg);
border: 1px solid #3a3020;
border-radius: var(--radius-sm);
padding: 1rem 1.25rem;
display: flex;
flex-direction: column;
align-items: center;
gap: 0.5rem;
min-width: 100px;
text-align: center;
font-size: 0.85rem;
font-weight: 600;
}
.diagram-box.diagram-accent {
border-color: var(--accent);
box-shadow: 0 0 12px rgba(232, 121, 29, 0.15);
}
.diagram-icon {
width: 28px;
height: 28px;
color: var(--accent);
}
.diagram-icon svg {
width: 100%;
height: 100%;
}
.diagram-arrow {
font-size: 1.5rem;
color: var(--text-muted);
line-height: 1;
}
/* Steps */
.hiw-steps {
display: flex;
flex-direction: column;
gap: 2rem;
}
.hiw-step {
display: flex;
gap: 1.25rem;
align-items: flex-start;
}
.hiw-step-number {
width: 40px;
height: 40px;
border-radius: 50%;
background: var(--accent);
color: #fff;
display: flex;
align-items: center;
justify-content: center;
font-weight: 800;
font-size: 1.1rem;
flex-shrink: 0;
}
.hiw-step-content {
flex: 1;
min-width: 0;
}
.hiw-step-content h3 {
font-size: 1.15rem;
font-weight: 700;
margin-bottom: 0.5rem;
}
.hiw-step-content p {
color: var(--text-muted);
font-size: 0.95rem;
line-height: 1.7;
}
.hiw-detail-grid {
display: grid;
grid-template-columns: repeat(2, 1fr);
gap: 0.75rem;
margin-top: 1rem;
}
.hiw-detail {
background: var(--bg-light);
border-radius: var(--radius-sm);
padding: 0.75rem 1rem;
display: flex;
flex-direction: column;
gap: 0.2rem;
}
.hiw-detail-label {
font-size: 0.75rem;
color: var(--text-muted);
text-transform: uppercase;
letter-spacing: 0.05em;
}
.hiw-detail-value {
font-size: 1.1rem;
font-weight: 700;
color: var(--accent);
}
.hiw-split-stat {
display: flex;
gap: 1.5rem;
margin-top: 1rem;
}
.hiw-stat {
display: flex;
flex-direction: column;
gap: 0.15rem;
}
.hiw-stat-number {
font-size: 1.5rem;
font-weight: 800;
color: var(--accent);
}
.hiw-stat-label {
font-size: 0.8rem;
color: var(--text-muted);
}
/* Features */
.hiw-features {
display: grid;
grid-template-columns: 1fr;
gap: 1.5rem;
}
.hiw-feature {
background: var(--bg-light);
border-radius: var(--radius);
padding: 1.5rem;
}
.hiw-feature-icon {
width: 32px;
height: 32px;
color: var(--accent);
margin-bottom: 0.75rem;
}
.hiw-feature-icon svg {
width: 100%;
height: 100%;
}
.hiw-feature h3 {
font-size: 1.05rem;
font-weight: 700;
margin-bottom: 0.5rem;
}
.hiw-feature p {
font-size: 0.9rem;
color: var(--text-muted);
line-height: 1.6;
}
/* CTA */
.hiw-cta {
text-align: center;
padding: 3rem 1.5rem;
}
.hiw-cta p {
font-size: 1.25rem;
font-weight: 700;
margin-bottom: 1rem;
}
.hiw-cta-btn {
display: inline-block;
background: var(--accent);
color: #fff;
padding: 0.75rem 2rem;
border-radius: 50px;
font-weight: 700;
font-size: 1rem;
transition: background 0.2s, transform 0.2s;
}
.hiw-cta-btn:hover {
background: var(--accent-hover);
color: #fff;
transform: translateY(-1px);
}
.hiw-cta-phone {
margin-top: 1rem;
color: var(--text-muted);
font-size: 0.95rem;
}
.hiw-cta-phone strong {
color: var(--accent);
}
/* Desktop */ /* Desktop */
@media (min-width: 768px) { @media (min-width: 768px) {
.hero { .hero {
@@ -406,6 +833,22 @@ a:hover {
} }
.episodes-section { .episodes-section {
padding: 2rem 2rem 8rem; padding: 2rem 2rem 3rem;
}
.hiw-section {
padding: 2.5rem 2rem;
}
.hiw-features {
grid-template-columns: repeat(3, 1fr);
}
.hiw-detail-grid {
grid-template-columns: repeat(4, 1fr);
}
.diagram-row-split {
flex-wrap: nowrap;
} }
} }

245
website/how-it-works.html Normal file
View File

@@ -0,0 +1,245 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>How It Works — Luke at the Roost</title>
<meta name="description" content="How Luke at the Roost works: AI-generated callers with unique personalities, real phone calls, voice synthesis, and a live control room — all built from scratch.">
<link rel="canonical" href="https://lukeattheroost.com/how-it-works">
<meta property="og:title" content="How It Works — Luke at the Roost">
<meta property="og:description" content="The tech behind a one-of-a-kind AI radio show. Real callers, AI callers, voice synthesis, and a live control room.">
<meta property="og:image" content="https://podcast.macneilmediagroup.com/media/podcasts/LukeAtTheRoost/cover_feed.png?v=3">
<meta property="og:url" content="https://lukeattheroost.com/how-it-works">
<meta property="og:type" content="website">
<link rel="icon" type="image/svg+xml" href="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 64 64'%3E%3Cpath d='M32 4c-2 0-4 2-4 5 0 1 .3 2 .8 3C26 13 24 16 24 20c0 2 .5 4 1.5 5.5L22 28c-2 1-4 3-5 6l-3 10c-.5 2 .5 3 2 3h4l1-4 2 4h6l-1-6 3 6h6l-1-6 3 6h4c1.5 0 2.5-1 2-3l-3-10c-1-3-3-5-5-6l-3.5-2.5C35.5 24 36 22 36 20c0-4-2-7-4.8-8 .5-1 .8-2 .8-3 0-3-2-5-4-5z' fill='%23e8791d'/%3E%3Ccircle cx='30' cy='17' r='1.5' fill='%231a1209'/%3E%3Cpath d='M36 15c1-1 3-1 4 0s1 3 0 4' fill='none' stroke='%23cc2222' stroke-width='2' stroke-linecap='round'/%3E%3Cpath d='M28 22c2 1 4 1 6 0' fill='none' stroke='%23e8791d' stroke-width='1.5' stroke-linecap='round'/%3E%3C/svg%3E">
<link rel="stylesheet" href="css/style.css">
</head>
<body>
<!-- Nav -->
<nav class="page-nav">
<a href="/" class="nav-home">Luke at the Roost</a>
</nav>
<!-- Page Header -->
<section class="page-header">
<h1>How It Works</h1>
<p class="page-subtitle">Every caller on the show is a one-of-a-kind character — generated in real time by a custom-built AI system. Here's a peek behind the curtain.</p>
</section>
<!-- Overview -->
<section class="hiw-section">
<div class="hiw-card hiw-hero-card">
<div class="hiw-diagram">
<div class="diagram-row">
<div class="diagram-box diagram-accent">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"/><path d="M19 10v2a7 7 0 0 1-14 0v-2"/><line x1="12" y1="19" x2="12" y2="23"/><line x1="8" y1="23" x2="16" y2="23"/></svg>
</div>
<span>Luke (Host)</span>
</div>
</div>
<div class="diagram-arrow">&#8595;</div>
<div class="diagram-row">
<div class="diagram-box">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><rect x="2" y="3" width="20" height="14" rx="2"/><line x1="8" y1="21" x2="16" y2="21"/><line x1="12" y1="17" x2="12" y2="21"/></svg>
</div>
<span>Control Room</span>
</div>
</div>
<div class="diagram-arrow">&#8595;</div>
<div class="diagram-row diagram-row-split">
<div class="diagram-box">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z"/></svg>
</div>
<span>AI Brain</span>
</div>
<div class="diagram-box">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"/><path d="M19 10v2a7 7 0 0 1-14 0v-2"/></svg>
</div>
<span>Voice Engine</span>
</div>
<div class="diagram-box">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><circle cx="12" cy="12" r="10"/><line x1="2" y1="12" x2="22" y2="12"/><path d="M12 2a15.3 15.3 0 0 1 4 10 15.3 15.3 0 0 1-4 10 15.3 15.3 0 0 1-4-10 15.3 15.3 0 0 1 4-10z"/></svg>
</div>
<span>Live News</span>
</div>
</div>
<div class="diagram-arrow">&#8595;</div>
<div class="diagram-row diagram-row-split">
<div class="diagram-box diagram-accent">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M17 21v-2a4 4 0 0 0-4-4H5a4 4 0 0 0-4 4v2"/><circle cx="9" cy="7" r="4"/><path d="M23 21v-2a4 4 0 0 0-3-3.87"/><path d="M16 3.13a4 4 0 0 1 0 7.75"/></svg>
</div>
<span>AI Callers</span>
</div>
<div class="diagram-box diagram-accent">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M22 16.92v3a2 2 0 0 1-2.18 2 19.79 19.79 0 0 1-8.63-3.07 19.5 19.5 0 0 1-6-6 19.79 19.79 0 0 1-3.07-8.67A2 2 0 0 1 4.11 2h3a2 2 0 0 1 2 1.72 12.84 12.84 0 0 0 .7 2.81 2 2 0 0 1-.45 2.11L8.09 9.91a16 16 0 0 0 6 6l1.27-1.27a2 2 0 0 1 2.11-.45 12.84 12.84 0 0 0 2.81.7A2 2 0 0 1 22 16.92z"/></svg>
</div>
<span>Real Callers</span>
</div>
</div>
</div>
</div>
</section>
<!-- Steps -->
<section class="hiw-section">
<h2>The Anatomy of an AI Caller</h2>
<div class="hiw-steps">
<div class="hiw-step">
<div class="hiw-step-number">1</div>
<div class="hiw-step-content">
<h3>A Person Is Born</h3>
<p>Every caller starts as a blank slate. The system generates a complete identity: name, age, job, hometown, and personality. Each caller gets a unique speaking style — some ramble, some are blunt, some deflect with humor. They have relationships, vehicles, opinions, memories, and reasons for being up this late.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Unique Names</span>
<span class="hiw-detail-value">48 names</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Personality Layers</span>
<span class="hiw-detail-value">20+</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Towns with Real Knowledge</span>
<span class="hiw-detail-value">32</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Unique Voices</span>
<span class="hiw-detail-value">25</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">2</div>
<div class="hiw-step-content">
<h3>They Know Their World</h3>
<p>Callers know real facts about where they live — the restaurants, the highways, the local gossip. When a caller says they're from Lordsburg, they actually know about the Hidalgo Hotel and the drive to Deming. The system pulls in real-time news so callers can reference things that actually happened today.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">3</div>
<div class="hiw-step-content">
<h3>They Have a Reason to Call</h3>
<p>Some callers have a problem — a fight with a neighbor, a situation at work, something weighing on them at 2 AM. Others call to geek out about Severance, argue about poker strategy, or share something they read about quantum physics. Every caller has a purpose, not just a script.</p>
<div class="hiw-split-stat">
<div class="hiw-stat">
<span class="hiw-stat-number">70%</span>
<span class="hiw-stat-label">Need advice</span>
</div>
<div class="hiw-stat">
<span class="hiw-stat-number">30%</span>
<span class="hiw-stat-label">Want to talk about something</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">4</div>
<div class="hiw-step-content">
<h3>The Conversation Is Real</h3>
<p>Luke talks to each caller using push-to-talk, just like a real radio show. His voice is transcribed in real time, sent to an AI that responds in character, and then converted to speech using a voice engine — all in a few seconds. The AI doesn't just answer questions; it reacts, gets emotional, goes on tangents, and remembers what was said earlier in the show.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">5</div>
<div class="hiw-step-content">
<h3>Real Callers Call In Too</h3>
<p>When you dial 208-439-LUKE, your call goes into a live queue. Luke sees you waiting and can take your call right from the control room. Your voice streams in real time — no pre-recording, no delay. You're live on the show, talking to Luke, and the AI callers might even react to what you said.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">6</div>
<div class="hiw-step-content">
<h3>The Control Room</h3>
<p>The entire show runs through a custom-built control panel. Luke manages callers, plays music and sound effects, runs ads, monitors the call queue, and controls everything from one screen. Audio is routed across multiple channels simultaneously — caller voices, music, sound effects, and live phone audio all on separate tracks for professional mixing.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Audio Channels</span>
<span class="hiw-detail-value">5 independent</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Caller Slots</span>
<span class="hiw-detail-value">10 per session</span>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- What makes it different -->
<section class="hiw-section">
<h2>What Makes This Different</h2>
<div class="hiw-features">
<div class="hiw-feature">
<div class="hiw-feature-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M20.84 4.61a5.5 5.5 0 0 0-7.78 0L12 5.67l-1.06-1.06a5.5 5.5 0 0 0-7.78 7.78l1.06 1.06L12 21.23l7.78-7.78 1.06-1.06a5.5 5.5 0 0 0 0-7.78z"/></svg>
</div>
<h3>Not Scripted</h3>
<p>Every conversation is improvised. Luke doesn't know what the caller is going to say. The AI doesn't follow a script. It's a real conversation between a human and an AI character who has a life, opinions, and something on their mind.</p>
</div>
<div class="hiw-feature">
<div class="hiw-feature-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M12 22s8-4 8-10V5l-8-3-8 3v7c0 6 8 10 8 10z"/></svg>
</div>
<h3>Built From Scratch</h3>
<p>This isn't an app with a plugin. Every piece — the caller generator, the voice engine, the control room, the phone system, the audio routing — was built specifically for this show. No templates, no shortcuts.</p>
</div>
<div class="hiw-feature">
<div class="hiw-feature-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><circle cx="12" cy="12" r="10"/><polyline points="12 6 12 12 16 14"/></svg>
</div>
<h3>Real Time</h3>
<p>Everything happens live. Caller generation, voice synthesis, news lookups, phone routing — all in real time during the show. There's no post-production trickery on the caller side. What you hear is what happened.</p>
</div>
</div>
</section>
<!-- CTA -->
<section class="hiw-section hiw-cta">
<p>Want to hear it for yourself?</p>
<a href="/" class="hiw-cta-btn">Listen to Episodes</a>
<div class="hiw-cta-phone">
Or call in live: <strong>208-439-LUKE</strong>
</div>
</section>
<!-- Footer -->
<footer class="footer">
<div class="footer-links">
<a href="/">Home</a>
<a href="https://open.spotify.com/show/0ZrpMigG1fo0CCN7F4YmuF?si=f990713adce84ba4" target="_blank" rel="noopener">Spotify</a>
<a href="https://www.youtube.com/watch?v=xryGLifMBTY&list=PLGq4uZyNV1yYH_rcitTTPVysPbC6-7pe-" target="_blank" rel="noopener">YouTube</a>
<a href="https://podcast.macneilmediagroup.com/@LukeAtTheRoost/feed.xml" target="_blank" rel="noopener">RSS</a>
</div>
<div class="footer-projects">
<span class="footer-projects-label">More from Luke</span>
<div class="footer-projects-links">
<a href="https://macneilmediagroup.com" target="_blank" rel="noopener">MacNeil Media Group</a>
<a href="https://prints.macneilmediagroup.com" target="_blank" rel="noopener">Photography Prints</a>
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div>
</div>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost</p>
</footer>
</body>
</html>

BIN
website/images/cover.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 MiB

View File

@@ -11,13 +11,13 @@
<!-- OG / Social --> <!-- OG / Social -->
<meta property="og:title" content="Luke at the Roost — Life advice for biologically questionable organisms"> <meta property="og:title" content="Luke at the Roost — Life advice for biologically questionable organisms">
<meta property="og:description" content="The call-in talk show where Luke gives life advice to biologically questionable organisms — from a desert hermit's RV. Call in: 208-439-LUKE."> <meta property="og:description" content="The call-in talk show where Luke gives life advice to biologically questionable organisms — from a desert hermit's RV. Call in: 208-439-LUKE.">
<meta property="og:image" content="https://podcast.macneilmediagroup.com/media/podcasts/LukeAtTheRoost/cover_feed.png?v=2"> <meta property="og:image" content="https://podcast.macneilmediagroup.com/media/podcasts/LukeAtTheRoost/cover_feed.png?v=3">
<meta property="og:url" content="https://lukeattheroost.com"> <meta property="og:url" content="https://lukeattheroost.com">
<meta property="og:type" content="website"> <meta property="og:type" content="website">
<meta name="twitter:card" content="summary_large_image"> <meta name="twitter:card" content="summary_large_image">
<meta name="twitter:title" content="Luke at the Roost"> <meta name="twitter:title" content="Luke at the Roost">
<meta name="twitter:description" content="The call-in talk show where Luke gives life advice to biologically questionable organisms. Call in: 208-439-LUKE"> <meta name="twitter:description" content="The call-in talk show where Luke gives life advice to biologically questionable organisms. Call in: 208-439-LUKE">
<meta name="twitter:image" content="https://podcast.macneilmediagroup.com/media/podcasts/LukeAtTheRoost/cover_feed.png?v=2"> <meta name="twitter:image" content="https://podcast.macneilmediagroup.com/media/podcasts/LukeAtTheRoost/cover_feed.png?v=3">
<!-- Favicon --> <!-- Favicon -->
<link rel="icon" type="image/svg+xml" href="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 64 64'%3E%3Cpath d='M32 4c-2 0-4 2-4 5 0 1 .3 2 .8 3C26 13 24 16 24 20c0 2 .5 4 1.5 5.5L22 28c-2 1-4 3-5 6l-3 10c-.5 2 .5 3 2 3h4l1-4 2 4h6l-1-6 3 6h6l-1-6 3 6h4c1.5 0 2.5-1 2-3l-3-10c-1-3-3-5-5-6l-3.5-2.5C35.5 24 36 22 36 20c0-4-2-7-4.8-8 .5-1 .8-2 .8-3 0-3-2-5-4-5z' fill='%23e8791d'/%3E%3Ccircle cx='30' cy='17' r='1.5' fill='%231a1209'/%3E%3Cpath d='M36 15c1-1 3-1 4 0s1 3 0 4' fill='none' stroke='%23cc2222' stroke-width='2' stroke-linecap='round'/%3E%3Cpath d='M28 22c2 1 4 1 6 0' fill='none' stroke='%23e8791d' stroke-width='1.5' stroke-linecap='round'/%3E%3C/svg%3E"> <link rel="icon" type="image/svg+xml" href="data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 64 64'%3E%3Cpath d='M32 4c-2 0-4 2-4 5 0 1 .3 2 .8 3C26 13 24 16 24 20c0 2 .5 4 1.5 5.5L22 28c-2 1-4 3-5 6l-3 10c-.5 2 .5 3 2 3h4l1-4 2 4h6l-1-6 3 6h6l-1-6 3 6h4c1.5 0 2.5-1 2-3l-3-10c-1-3-3-5-5-6l-3.5-2.5C35.5 24 36 22 36 20c0-4-2-7-4.8-8 .5-1 .8-2 .8-3 0-3-2-5-4-5z' fill='%23e8791d'/%3E%3Ccircle cx='30' cy='17' r='1.5' fill='%231a1209'/%3E%3Cpath d='M36 15c1-1 3-1 4 0s1 3 0 4' fill='none' stroke='%23cc2222' stroke-width='2' stroke-linecap='round'/%3E%3Cpath d='M28 22c2 1 4 1 6 0' fill='none' stroke='%23e8791d' stroke-width='1.5' stroke-linecap='round'/%3E%3C/svg%3E">
@@ -33,7 +33,7 @@
"name": "Luke at the Roost", "name": "Luke at the Roost",
"description": "The call-in talk show where Luke gives life advice to biologically questionable organisms. Broadcast from a desert hermit's RV, featuring a mix of real callers and AI-generated callers.", "description": "The call-in talk show where Luke gives life advice to biologically questionable organisms. Broadcast from a desert hermit's RV, featuring a mix of real callers and AI-generated callers.",
"url": "https://lukeattheroost.com", "url": "https://lukeattheroost.com",
"image": "https://podcast.macneilmediagroup.com/media/podcasts/LukeAtTheRoost/cover_feed.png", "image": "https://podcast.macneilmediagroup.com/media/podcasts/LukeAtTheRoost/cover_feed.png?v=3",
"author": { "author": {
"@type": "Person", "@type": "Person",
"name": "Luke MacNeil" "name": "Luke MacNeil"
@@ -65,7 +65,7 @@
<div class="hero-inner"> <div class="hero-inner">
<img <img
class="cover-art" class="cover-art"
src="https://podcast.macneilmediagroup.com/media/podcasts/LukeAtTheRoost/cover_feed.png?v=2" src="images/cover.png"
alt="Luke at the Roost cover art" alt="Luke at the Roost cover art"
> >
<div class="hero-info"> <div class="hero-info">
@@ -77,6 +77,10 @@
<span class="phone-digits">(208-439-5853)</span> <span class="phone-digits">(208-439-5853)</span>
</div> </div>
<div class="subscribe-row"> <div class="subscribe-row">
<a href="/how-it-works" class="subscribe-btn btn-hiw">
<svg viewBox="0 0 24 24" fill="currentColor"><path d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm1 17h-2v-2h2v2zm2.07-7.75l-.9.92C13.45 12.9 13 13.5 13 15h-2v-.5c0-1.1.45-2.1 1.17-2.83l1.24-1.26c.37-.36.59-.86.59-1.41 0-1.1-.9-2-2-2s-2 .9-2 2H8c0-2.21 1.79-4 4-4s4 1.79 4 4c0 .88-.36 1.68-.93 2.25z"/></svg>
How It Works
</a>
<a href="https://open.spotify.com/show/0ZrpMigG1fo0CCN7F4YmuF?si=f990713adce84ba4" target="_blank" rel="noopener" class="subscribe-btn btn-spotify"> <a href="https://open.spotify.com/show/0ZrpMigG1fo0CCN7F4YmuF?si=f990713adce84ba4" target="_blank" rel="noopener" class="subscribe-btn btn-spotify">
<svg viewBox="0 0 24 24" fill="currentColor"><path d="M12 0C5.4 0 0 5.4 0 12s5.4 12 12 12 12-5.4 12-12S18.66 0 12 0zm5.521 17.34c-.24.359-.66.48-1.021.24-2.82-1.74-6.36-2.101-10.561-1.141-.418.122-.779-.179-.899-.539-.12-.421.18-.78.54-.9 4.56-1.021 8.52-.6 11.64 1.32.42.18.479.659.301 1.02zm1.44-3.3c-.301.42-.841.6-1.262.3-3.239-1.98-8.159-2.58-11.939-1.38-.479.12-1.02-.12-1.14-.6-.12-.48.12-1.021.6-1.141C9.6 9.9 15 10.561 18.72 12.84c.361.181.54.78.241 1.2zm.12-3.36C15.24 8.4 8.82 8.16 5.16 9.301c-.6.179-1.2-.181-1.38-.721-.18-.601.18-1.2.72-1.381 4.26-1.26 11.28-1.02 15.721 1.621.539.3.719 1.02.419 1.56-.299.421-1.02.599-1.559.3z"/></svg> <svg viewBox="0 0 24 24" fill="currentColor"><path d="M12 0C5.4 0 0 5.4 0 12s5.4 12 12 12 12-5.4 12-12S18.66 0 12 0zm5.521 17.34c-.24.359-.66.48-1.021.24-2.82-1.74-6.36-2.101-10.561-1.141-.418.122-.779-.179-.899-.539-.12-.421.18-.78.54-.9 4.56-1.021 8.52-.6 11.64 1.32.42.18.479.659.301 1.02zm1.44-3.3c-.301.42-.841.6-1.262.3-3.239-1.98-8.159-2.58-11.939-1.38-.479.12-1.02-.12-1.14-.6-.12-.48.12-1.021.6-1.141C9.6 9.9 15 10.561 18.72 12.84c.361.181.54.78.241 1.2zm.12-3.36C15.24 8.4 8.82 8.16 5.16 9.301c-.6.179-1.2-.181-1.38-.721-.18-.601.18-1.2.72-1.381 4.26-1.26 11.28-1.02 15.721 1.621.539.3.719 1.02.419 1.56-.299.421-1.02.599-1.559.3z"/></svg>
Spotify Spotify
@@ -98,6 +102,80 @@
</div> </div>
</section> </section>
<!-- Testimonials -->
<section class="testimonials-section">
<h2>What Callers Are Saying</h2>
<div class="testimonials-slider" id="testimonials-slider">
<div class="testimonials-track" id="testimonials-track">
<div class="testimonial-card"><div class="testimonial-inner">
<div class="testimonial-stars">&#9733;&#9733;&#9733;&#9733;&#9733;</div>
<p class="testimonial-text">"I called in at 2 AM about my neighbor's rooster situation and Luke talked me off the ledge. Literally saved my relationship with the entire block. My wife thinks I'm crazy for calling a radio show but hey, it worked."</p>
<div class="testimonial-author">
<span class="testimonial-name">Tony M.</span>
<span class="testimonial-location">Lordsburg, NM</span>
</div>
</div></div>
<div class="testimonial-card"><div class="testimonial-inner">
<div class="testimonial-stars">&#9733;&#9733;&#9733;&#9733;&#9733;</div>
<p class="testimonial-text">"Called to talk about the Severance finale and ended up getting life advice I didn't know I needed. Luke somehow connected Lumon Industries to my actual job and I quit the next week. Best decision I ever made."</p>
<div class="testimonial-author">
<span class="testimonial-name">Carmen R.</span>
<span class="testimonial-location">Deming, NM</span>
</div>
</div></div>
<div class="testimonial-card"><div class="testimonial-inner">
<div class="testimonial-stars">&#9733;&#9733;&#9733;&#9733;&#9733;</div>
<p class="testimonial-text">"I've been listening since episode one. Called in about my truck breaking down outside Animas and Luke spent twenty minutes just talking me through it. Turns out it was the alternator AND my attitude. He was right about both."</p>
<div class="testimonial-author">
<span class="testimonial-name">Dale W.</span>
<span class="testimonial-location">Animas, NM</span>
</div>
</div></div>
<div class="testimonial-card"><div class="testimonial-inner">
<div class="testimonial-stars">&#9733;&#9733;&#9733;&#9733;&#9734;</div>
<p class="testimonial-text">"I called in to ask about astrophotography tips and somehow ended up telling Luke about my divorce. He's got this way of getting you to open up. Still shooting the Milky Way every clear night though. Thanks Luke."</p>
<div class="testimonial-author">
<span class="testimonial-name">Jessie K.</span>
<span class="testimonial-location">Silver City, NM</span>
</div>
</div></div>
<div class="testimonial-card"><div class="testimonial-inner">
<div class="testimonial-stars">&#9733;&#9733;&#9733;&#9733;&#9733;</div>
<p class="testimonial-text">"My buddy dared me to call in and I ended up having the most real conversation I've had in years. We talked about The Wire for like ten minutes and then he hit me with some truth about why I keep ghosting people. This show is something else."</p>
<div class="testimonial-author">
<span class="testimonial-name">Marcus T.</span>
<span class="testimonial-location">Las Cruces, NM</span>
</div>
</div></div>
<div class="testimonial-card"><div class="testimonial-inner">
<div class="testimonial-stars">&#9733;&#9733;&#9733;&#9733;&#9733;</div>
<p class="testimonial-text">"I work night shifts at the mine and this show keeps me sane. Finally called in about a thing with my sister and Luke gave me advice that actually made sense. We're talking again for the first time in three years."</p>
<div class="testimonial-author">
<span class="testimonial-name">Ray D.</span>
<span class="testimonial-location">Tyrone, NM</span>
</div>
</div></div>
<div class="testimonial-card"><div class="testimonial-inner">
<div class="testimonial-stars">&#9733;&#9733;&#9733;&#9733;&#9733;</div>
<p class="testimonial-text">"Called about my poker game falling apart because my best friend cheated. Luke compared it to a Breaking Bad episode and somehow made me see the whole situation differently. We play again every Thursday now."</p>
<div class="testimonial-author">
<span class="testimonial-name">Elena S.</span>
<span class="testimonial-location">Hachita, NM</span>
</div>
</div></div>
<div class="testimonial-card"><div class="testimonial-inner">
<div class="testimonial-stars">&#9733;&#9733;&#9733;&#9733;&#9734;</div>
<p class="testimonial-text">"I was just gonna ask about quantum entanglement because I read this article, but Luke turned it into a metaphor for my long distance relationship and honestly? He wasn't wrong. We're moving in together next month."</p>
<div class="testimonial-author">
<span class="testimonial-name">Priya N.</span>
<span class="testimonial-location">Tucson, AZ</span>
</div>
</div></div>
</div>
</div>
<div class="testimonials-dots" id="testimonials-dots"></div>
</section>
<!-- Episodes --> <!-- Episodes -->
<section class="episodes-section"> <section class="episodes-section">
<h2>Episodes</h2> <h2>Episodes</h2>
@@ -109,10 +187,19 @@
<!-- Footer --> <!-- Footer -->
<footer class="footer"> <footer class="footer">
<div class="footer-links"> <div class="footer-links">
<a href="/how-it-works">How It Works</a>
<a href="https://open.spotify.com/show/0ZrpMigG1fo0CCN7F4YmuF?si=f990713adce84ba4" target="_blank" rel="noopener">Spotify</a> <a href="https://open.spotify.com/show/0ZrpMigG1fo0CCN7F4YmuF?si=f990713adce84ba4" target="_blank" rel="noopener">Spotify</a>
<a href="https://www.youtube.com/watch?v=xryGLifMBTY&list=PLGq4uZyNV1yYH_rcitTTPVysPbC6-7pe-" target="_blank" rel="noopener">YouTube</a> <a href="https://www.youtube.com/watch?v=xryGLifMBTY&list=PLGq4uZyNV1yYH_rcitTTPVysPbC6-7pe-" target="_blank" rel="noopener">YouTube</a>
<a href="https://podcast.macneilmediagroup.com/@LukeAtTheRoost/feed.xml" target="_blank" rel="noopener">RSS</a> <a href="https://podcast.macneilmediagroup.com/@LukeAtTheRoost/feed.xml" target="_blank" rel="noopener">RSS</a>
</div> </div>
<div class="footer-projects">
<span class="footer-projects-label">More from Luke</span>
<div class="footer-projects-links">
<a href="https://macneilmediagroup.com" target="_blank" rel="noopener">MacNeil Media Group</a>
<a href="https://prints.macneilmediagroup.com" target="_blank" rel="noopener">Photography Prints</a>
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div>
</div>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p> <p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost</p> <p>&copy; 2026 Luke at the Roost</p>
</footer> </footer>

View File

@@ -223,5 +223,83 @@ playerProgress.addEventListener('click', (e) => {
} }
}); });
// Testimonials Slider
function initTestimonials() {
const track = document.getElementById('testimonials-track');
const dotsContainer = document.getElementById('testimonials-dots');
const cards = track.querySelectorAll('.testimonial-card');
if (!cards.length) return;
let currentIndex = 0;
let autoplayTimer = null;
const maxIndex = () => Math.max(0, cards.length - 1);
function buildDots() {
dotsContainer.innerHTML = '';
for (let i = 0; i < cards.length; i++) {
const dot = document.createElement('button');
dot.className = 'testimonial-dot' + (i === currentIndex ? ' active' : '');
dot.setAttribute('aria-label', `Testimonial ${i + 1}`);
dot.addEventListener('click', () => goTo(i));
dotsContainer.appendChild(dot);
}
}
function updatePosition() {
const cardWidth = cards[0].offsetWidth;
track.style.transform = `translateX(-${currentIndex * cardWidth}px)`;
dotsContainer.querySelectorAll('.testimonial-dot').forEach((d, i) => {
d.classList.toggle('active', i === currentIndex);
});
}
function goTo(index) {
currentIndex = Math.max(0, Math.min(index, maxIndex()));
updatePosition();
resetAutoplay();
}
function next() {
goTo(currentIndex >= maxIndex() ? 0 : currentIndex + 1);
}
function resetAutoplay() {
clearInterval(autoplayTimer);
autoplayTimer = setInterval(next, 10000);
}
// Touch/swipe support
let touchStartX = 0;
let touchDelta = 0;
track.addEventListener('touchstart', (e) => {
touchStartX = e.touches[0].clientX;
touchDelta = 0;
clearInterval(autoplayTimer);
}, { passive: true });
track.addEventListener('touchmove', (e) => {
touchDelta = e.touches[0].clientX - touchStartX;
}, { passive: true });
track.addEventListener('touchend', () => {
if (Math.abs(touchDelta) > 50) {
touchDelta < 0 ? goTo(currentIndex + 1) : goTo(currentIndex - 1);
}
resetAutoplay();
});
// Recalculate on resize
window.addEventListener('resize', () => {
if (currentIndex > maxIndex()) currentIndex = maxIndex();
buildDots();
updatePosition();
});
buildDots();
updatePosition();
resetAutoplay();
}
// Init // Init
fetchEpisodes(); fetchEpisodes();
initTestimonials();