Compare commits

..

25 Commits

Author SHA1 Message Date
0412f4487f Enhance caller personality for depth and authenticity
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 00:39:49 -07:00
50e3d3af7d Include news and research context in caller prompts
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 00:35:55 -07:00
a06d0a22e1 Wire up headline fetch and background research triggers
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 00:22:49 -07:00
e46337a05a Add session news/research fields and helper functions
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 00:21:25 -07:00
e28579f909 Add NewsService for current events awareness
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 00:18:40 -07:00
437980dfd4 Update tests for SignalWire phone caller format
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:47:50 -07:00
ecc30c44e1 Update frontend for phone caller display
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:46:48 -07:00
9361a3c2e2 Remove browser call-in page 2026-02-05 17:46:37 -07:00
9016f9734f Add SignalWire endpoints, update queue/hangup for phone callers
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:45:08 -07:00
051790136e Update CallerService for SignalWire protocol
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:41:27 -07:00
c22818bfec Add SignalWire configuration
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:38:41 -07:00
a1c94a3682 Fix unnatural response cutoffs
- Replace aggressive sentence-count limiting with ensure_complete_thought()
  which only trims if the LLM was actually cut off mid-sentence
- Softer prompt guidance for natural brevity instead of rigid sentence count
- max_tokens at 100 as natural length cap

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:18:22 -07:00
9d4b8a0d22 Replace token-based truncation with sentence-count limiting
- max_tokens back to 150 so LLM can finish thoughts
- New limit_sentences() keeps only first 2 complete sentences
- Never cuts mid-sentence — always ends at punctuation
- Applied to both chat and auto-respond paths

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:15:04 -07:00
9c5f7c5cfe Add debug logging and safety for piggybacked recording
- Log chunk count and peak audio level on recording stop
- Add null check on _recorded_audio in callback
- Small delay after stopping piggybacked recording for callback to finish

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:11:51 -07:00
6a56967540 Enforce shorter AI responses and prevent cut-off sentences
- Reduce max_tokens from 100 to 75 for shorter output
- Add truncate_to_complete_sentence() to trim at last punctuation
- Applied to both chat and auto-respond paths

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:07:41 -07:00
0e65fa5084 Force shorter AI responses — max 1-2 sentences
- Much stronger prompt language: "no more than 2 sentences EVER"
- Added "DO NOT ramble" instruction
- Reduced max_tokens back to 100 as hard limit

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:05:51 -07:00
3192735615 Fix AI responses being cut off
- Increase max_tokens from 100 to 150 to avoid mid-sentence truncation
- Tighten prompt to 1-2 short sentences with emphasis on completing them

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:04:12 -07:00
d583b48af0 Fix choppy/distorted audio to live caller
- Mute host mic forwarding while TTS is streaming to prevent interleaving
  both audio sources into the same playback buffer
- Replace nearest-neighbor downsampling with box-filter averaging on both
  server (host mic) and browser (caller mic) for anti-aliased resampling

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 17:01:33 -07:00
d4e25ceb88 Stream TTS audio to caller in real-time chunks
TTS audio was sent as a single huge WebSocket frame that overflowed the
browser's 3s ring buffer. Now streams in 60ms chunks at real-time rate.
Also increased browser ring buffer from 3s to 10s as safety net.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 16:56:22 -07:00
97d37f3381 Send AI TTS audio to live caller during auto-respond
The auto-respond function played AI TTS to the local Loopback channel
but didn't send it over WebSocket to the live caller in the browser.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 16:53:41 -07:00
eaedc4214b Reduce live caller latency and improve reliability
- Replace per-callback async task spawning with persistent queue-based sender
- Buffer host mic to 60ms chunks (was 21ms) to reduce WebSocket frame rate
- Reduce server ring buffer prebuffer from 150ms to 80ms
- Reduce browser playback jitter buffer from 150ms to 100ms

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 16:47:17 -07:00
af8606b5b7 Fix recording conflict when host stream is active
When a live caller is on air, the host stream already has an InputStream
open. Opening a second one for push-to-talk recording causes a conflict.
Now recording piggybacks on the host stream callback instead.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 16:42:07 -07:00
4d97ea9099 Replace queue with ring buffer jitter absorption for live caller audio
- Server: 150ms pre-buffer ring buffer eliminates gaps from timing mismatches
- Browser playback: 150ms jitter buffer (up from 80ms) for network jitter
- Capture chunks: 960 samples/60ms (better network efficiency)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 16:37:50 -07:00
7aed4d9c34 Fix live caller audio latency and choppiness
- Reduce capture chunk from 4096 to 640 samples (256ms → 40ms)
- Replace BufferSource scheduling with AudioWorklet playback ring buffer
- Add 80ms jitter buffer with linear interpolation upsampling
- Reduce host mic and live caller stream blocksizes from 4096/2048 to 1024
- Replace librosa.resample with numpy interpolation in send_audio_to_caller

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 16:32:27 -07:00
ab36ad8d5b Fix choppy audio and hanging when taking live callers
- Use persistent callback-based output stream instead of opening/closing per chunk
- Replace librosa.resample with simple decimation in real-time audio callbacks
- Move host stream initialization to background thread to avoid blocking
- Change live caller channel default to 9

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-05 16:24:27 -07:00
10 changed files with 1071 additions and 682 deletions

View File

@@ -15,6 +15,12 @@ class Settings(BaseSettings):
openrouter_api_key: str = os.getenv("OPENROUTER_API_KEY", "") openrouter_api_key: str = os.getenv("OPENROUTER_API_KEY", "")
inworld_api_key: str = os.getenv("INWORLD_API_KEY", "") inworld_api_key: str = os.getenv("INWORLD_API_KEY", "")
# SignalWire
signalwire_project_id: str = os.getenv("SIGNALWIRE_PROJECT_ID", "")
signalwire_space: str = os.getenv("SIGNALWIRE_SPACE", "")
signalwire_token: str = os.getenv("SIGNALWIRE_TOKEN", "")
signalwire_phone: str = os.getenv("SIGNALWIRE_PHONE", "")
# LLM Settings # LLM Settings
llm_provider: str = "openrouter" # "openrouter" or "ollama" llm_provider: str = "openrouter" # "openrouter" or "ollama"
openrouter_model: str = "anthropic/claude-3-haiku" openrouter_model: str = "anthropic/claude-3-haiku"

File diff suppressed because it is too large Load Diff

View File

@@ -24,7 +24,7 @@ class AudioService:
self.output_device: Optional[int] = None # Single output device (multi-channel) self.output_device: Optional[int] = None # Single output device (multi-channel)
self.caller_channel: int = 1 # Channel for caller TTS self.caller_channel: int = 1 # Channel for caller TTS
self.live_caller_channel: int = 4 # Channel for live caller audio self.live_caller_channel: int = 9 # Channel for live caller audio
self.music_channel: int = 2 # Channel for music self.music_channel: int = 2 # Channel for music
self.sfx_channel: int = 3 # Channel for SFX self.sfx_channel: int = 3 # Channel for SFX
self.phone_filter: bool = False # Phone filter on caller voices self.phone_filter: bool = False # Phone filter on caller voices
@@ -52,6 +52,11 @@ class AudioService:
# Host mic streaming state # Host mic streaming state
self._host_stream: Optional[sd.InputStream] = None self._host_stream: Optional[sd.InputStream] = None
self._host_send_callback: Optional[Callable] = None self._host_send_callback: Optional[Callable] = None
self._host_device_sr: int = 48000
# Live caller routing state
self._live_caller_stream: Optional[sd.OutputStream] = None
self._live_caller_write: Optional[Callable] = None
# Sample rates # Sample rates
self.input_sample_rate = 16000 # For Whisper self.input_sample_rate = 16000 # For Whisper
@@ -169,6 +174,13 @@ class AudioService:
self._recording = True self._recording = True
self._recorded_audio = [] self._recorded_audio = []
if self._host_stream is not None:
# Host stream already capturing — piggyback on it
self._record_device_sr = self._host_device_sr
print(f"Recording started (piggybacking on host stream @ {self._host_device_sr}Hz)")
return True
self._record_thread = threading.Thread(target=self._record_worker) self._record_thread = threading.Thread(target=self._record_worker)
self._record_thread.start() self._record_thread.start()
print(f"Recording started from device {self.input_device}") print(f"Recording started from device {self.input_device}")
@@ -184,14 +196,19 @@ class AudioService:
self._recording = False self._recording = False
if self._record_thread: if self._record_thread:
self._record_thread.join(timeout=2.0) self._record_thread.join(timeout=2.0)
self._record_thread = None
else:
# Piggybacking on host stream — give callback a moment to finish
time.sleep(0.05)
if not self._recorded_audio: if not self._recorded_audio:
print(f"Recording stopped: NO audio chunks captured (piggyback={self._host_stream is not None})")
return b"" return b""
# Combine all chunks # Combine all chunks
audio = np.concatenate(self._recorded_audio) audio = np.concatenate(self._recorded_audio)
device_sr = getattr(self, '_record_device_sr', 48000) device_sr = getattr(self, '_record_device_sr', 48000)
print(f"Recording stopped: {len(audio)} samples @ {device_sr}Hz ({len(audio)/device_sr:.2f}s)") print(f"Recording stopped: {len(audio)} samples @ {device_sr}Hz ({len(audio)/device_sr:.2f}s), chunks={len(self._recorded_audio)}, peak={np.abs(audio).max():.4f}")
# Resample to 16kHz for Whisper # Resample to 16kHz for Whisper
if device_sr != 16000: if device_sr != 16000:
@@ -320,38 +337,111 @@ class AudioService:
"""Stop any playing caller audio""" """Stop any playing caller audio"""
self._caller_stop_event.set() self._caller_stop_event.set()
def route_real_caller_audio(self, pcm_data: bytes, sample_rate: int): def _start_live_caller_stream(self):
"""Route real caller PCM audio to the configured live caller Loopback channel""" """Start persistent output stream with ring buffer jitter absorption"""
import librosa if self._live_caller_stream is not None:
return
if self.output_device is None: if self.output_device is None:
return return
device_info = sd.query_devices(self.output_device)
num_channels = device_info['max_output_channels']
device_sr = int(device_info['default_samplerate'])
channel_idx = min(self.live_caller_channel, num_channels) - 1
self._live_caller_device_sr = device_sr
self._live_caller_num_channels = num_channels
self._live_caller_channel_idx = channel_idx
# Ring buffer: 3 seconds capacity, 80ms pre-buffer before playback starts
ring_size = int(device_sr * 3)
ring = np.zeros(ring_size, dtype=np.float32)
prebuffer_samples = int(device_sr * 0.08)
# Mutable state shared between writer (main thread) and reader (audio callback)
# CPython GIL makes individual int reads/writes atomic
state = {"write_pos": 0, "read_pos": 0, "avail": 0, "started": False}
def write_audio(data):
n = len(data)
wp = state["write_pos"]
if wp + n <= ring_size:
ring[wp:wp + n] = data
else:
first = ring_size - wp
ring[wp:] = data[:first]
ring[:n - first] = data[first:]
state["write_pos"] = (wp + n) % ring_size
state["avail"] += n
def callback(outdata, frames, time_info, status):
outdata.fill(0)
avail = state["avail"]
if not state["started"]:
if avail >= prebuffer_samples:
state["started"] = True
else:
return
if avail < frames:
# Underrun — stop and re-buffer
state["started"] = False
return
rp = state["read_pos"]
if rp + frames <= ring_size:
outdata[:frames, channel_idx] = ring[rp:rp + frames]
else:
first = ring_size - rp
outdata[:first, channel_idx] = ring[rp:]
outdata[first:frames, channel_idx] = ring[:frames - first]
state["read_pos"] = (rp + frames) % ring_size
state["avail"] -= frames
self._live_caller_write = write_audio
self._live_caller_stream = sd.OutputStream(
device=self.output_device,
samplerate=device_sr,
channels=num_channels,
dtype=np.float32,
callback=callback,
blocksize=1024,
)
self._live_caller_stream.start()
print(f"[Audio] Live caller stream started on ch {self.live_caller_channel} @ {device_sr}Hz (prebuffer {prebuffer_samples} samples)")
def _stop_live_caller_stream(self):
"""Stop persistent live caller output stream"""
if self._live_caller_stream:
self._live_caller_stream.stop()
self._live_caller_stream.close()
self._live_caller_stream = None
self._live_caller_write = None
print("[Audio] Live caller stream stopped")
def route_real_caller_audio(self, pcm_data: bytes, sample_rate: int):
"""Route real caller PCM audio to the configured live caller Loopback channel"""
if self.output_device is None:
return
if self._live_caller_stream is None:
self._start_live_caller_stream()
try: try:
# Convert bytes to float32
audio = np.frombuffer(pcm_data, dtype=np.int16).astype(np.float32) / 32768.0 audio = np.frombuffer(pcm_data, dtype=np.int16).astype(np.float32) / 32768.0
device_info = sd.query_devices(self.output_device) device_sr = self._live_caller_device_sr
num_channels = device_info['max_output_channels']
device_sr = int(device_info['default_samplerate'])
channel_idx = min(self.live_caller_channel, num_channels) - 1
# Resample to device sample rate if needed
if sample_rate != device_sr: if sample_rate != device_sr:
audio = librosa.resample(audio, orig_sr=sample_rate, target_sr=device_sr) ratio = device_sr / sample_rate
out_len = int(len(audio) * ratio)
indices = (np.arange(out_len) / ratio).astype(int)
indices = np.clip(indices, 0, len(audio) - 1)
audio = audio[indices]
# Create multi-channel output if self._live_caller_write:
multi_ch = np.zeros((len(audio), num_channels), dtype=np.float32) self._live_caller_write(audio)
multi_ch[:, channel_idx] = audio
# Write to output device
with sd.OutputStream(
device=self.output_device,
samplerate=device_sr,
channels=num_channels,
dtype=np.float32,
) as stream:
stream.write(multi_ch)
except Exception as e: except Exception as e:
print(f"Real caller audio routing error: {e}") print(f"Real caller audio routing error: {e}")
@@ -366,44 +456,64 @@ class AudioService:
self._host_send_callback = send_callback self._host_send_callback = send_callback
device_info = sd.query_devices(self.input_device) def _start():
max_channels = device_info['max_input_channels'] device_info = sd.query_devices(self.input_device)
device_sr = int(device_info['default_samplerate']) max_channels = device_info['max_input_channels']
record_channel = min(self.input_channel, max_channels) - 1 device_sr = int(device_info['default_samplerate'])
record_channel = min(self.input_channel, max_channels) - 1
step = max(1, int(device_sr / 16000))
import librosa # Buffer host mic to send ~60ms chunks instead of tiny 21ms ones
host_accum = []
host_accum_samples = [0]
send_threshold = 960 # 60ms at 16kHz
def callback(indata, frames, time_info, status): def callback(indata, frames, time_info, status):
if not self._host_send_callback: # Capture for push-to-talk recording if active
return if self._recording and self._recorded_audio is not None:
# Extract the configured input channel self._recorded_audio.append(indata[:, record_channel].copy())
mono = indata[:, record_channel].copy()
# Resample to 16kHz if needed
if device_sr != 16000:
mono = librosa.resample(mono, orig_sr=device_sr, target_sr=16000)
# Convert float32 to int16 PCM
pcm = (mono * 32767).astype(np.int16).tobytes()
self._host_send_callback(pcm)
self._host_stream = sd.InputStream( if not self._host_send_callback:
device=self.input_device, return
channels=max_channels, mono = indata[:, record_channel]
samplerate=device_sr, # Downsample to ~16kHz with averaging (anti-aliased)
dtype=np.float32, if step > 1:
blocksize=4096, n = len(mono) // step * step
callback=callback, mono = mono[:n].reshape(-1, step).mean(axis=1)
)
self._host_stream.start() host_accum.append(mono.copy())
print(f"[Audio] Host mic streaming started (device {self.input_device} ch {self.input_channel} @ {device_sr}Hz)") host_accum_samples[0] += len(mono)
if host_accum_samples[0] >= send_threshold:
combined = np.concatenate(host_accum)
pcm = (combined * 32767).astype(np.int16).tobytes()
host_accum.clear()
host_accum_samples[0] = 0
self._host_send_callback(pcm)
self._host_device_sr = device_sr
self._host_stream = sd.InputStream(
device=self.input_device,
channels=max_channels,
samplerate=device_sr,
dtype=np.float32,
blocksize=1024,
callback=callback,
)
self._host_stream.start()
print(f"[Audio] Host mic streaming started (device {self.input_device} ch {self.input_channel} @ {device_sr}Hz)")
threading.Thread(target=_start, daemon=True).start()
def stop_host_stream(self): def stop_host_stream(self):
"""Stop host mic streaming""" """Stop host mic streaming and live caller output"""
if self._host_stream: if self._host_stream:
self._host_stream.stop() self._host_stream.stop()
self._host_stream.close() self._host_stream.close()
self._host_stream = None self._host_stream = None
self._host_send_callback = None self._host_send_callback = None
print("[Audio] Host mic streaming stopped") print("[Audio] Host mic streaming stopped")
self._stop_live_caller_stream()
# --- Music Playback --- # --- Music Playback ---

View File

@@ -1,13 +1,14 @@
"""Browser caller queue and audio stream service""" """Phone caller queue and audio stream service"""
import asyncio import asyncio
import time import time
import threading import threading
import numpy as np
from typing import Optional from typing import Optional
class CallerService: class CallerService:
"""Manages browser caller queue, channel allocation, and WebSocket streams""" """Manages phone caller queue, channel allocation, and WebSocket streams"""
FIRST_REAL_CHANNEL = 3 FIRST_REAL_CHANNEL = 3
@@ -18,15 +19,17 @@ class CallerService:
self._caller_counter: int = 0 self._caller_counter: int = 0
self._lock = threading.Lock() self._lock = threading.Lock()
self._websockets: dict[str, any] = {} # caller_id -> WebSocket self._websockets: dict[str, any] = {} # caller_id -> WebSocket
self._call_sids: dict[str, str] = {} # caller_id -> SignalWire callSid
self.streaming_tts: bool = False # True while TTS audio is being streamed
def add_to_queue(self, caller_id: str, name: str): def add_to_queue(self, caller_id: str, phone: str):
with self._lock: with self._lock:
self._queue.append({ self._queue.append({
"caller_id": caller_id, "caller_id": caller_id,
"name": name, "phone": phone,
"queued_at": time.time(), "queued_at": time.time(),
}) })
print(f"[Caller] {name} added to queue (ID: {caller_id})") print(f"[Caller] {phone} added to queue (ID: {caller_id})")
def remove_from_queue(self, caller_id: str): def remove_from_queue(self, caller_id: str):
with self._lock: with self._lock:
@@ -39,7 +42,7 @@ class CallerService:
return [ return [
{ {
"caller_id": c["caller_id"], "caller_id": c["caller_id"],
"name": c["name"], "phone": c["phone"],
"wait_time": int(now - c["queued_at"]), "wait_time": int(now - c["queued_at"]),
} }
for c in self._queue for c in self._queue
@@ -72,24 +75,25 @@ class CallerService:
channel = self.allocate_channel() channel = self.allocate_channel()
self._caller_counter += 1 self._caller_counter += 1
name = caller["name"] phone = caller["phone"]
call_info = { call_info = {
"caller_id": caller_id, "caller_id": caller_id,
"name": name, "phone": phone,
"channel": channel, "channel": channel,
"started_at": time.time(), "started_at": time.time(),
} }
self.active_calls[caller_id] = call_info self.active_calls[caller_id] = call_info
print(f"[Caller] {name} taken on air — channel {channel}") print(f"[Caller] {phone} taken on air — channel {channel}")
return call_info return call_info
def hangup(self, caller_id: str): def hangup(self, caller_id: str):
call_info = self.active_calls.pop(caller_id, None) call_info = self.active_calls.pop(caller_id, None)
if call_info: if call_info:
self.release_channel(call_info["channel"]) self.release_channel(call_info["channel"])
print(f"[Caller] {call_info['name']} hung up — channel {call_info['channel']} released") print(f"[Caller] {call_info['phone']} hung up — channel {call_info['channel']} released")
self._websockets.pop(caller_id, None) self._websockets.pop(caller_id, None)
self._call_sids.pop(caller_id, None)
def reset(self): def reset(self):
with self._lock: with self._lock:
@@ -100,6 +104,7 @@ class CallerService:
self._allocated_channels.clear() self._allocated_channels.clear()
self._caller_counter = 0 self._caller_counter = 0
self._websockets.clear() self._websockets.clear()
self._call_sids.clear()
print("[Caller] Service reset") print("[Caller] Service reset")
def register_websocket(self, caller_id: str, websocket): def register_websocket(self, caller_id: str, websocket):
@@ -111,37 +116,77 @@ class CallerService:
self._websockets.pop(caller_id, None) self._websockets.pop(caller_id, None)
async def send_audio_to_caller(self, caller_id: str, pcm_data: bytes, sample_rate: int): async def send_audio_to_caller(self, caller_id: str, pcm_data: bytes, sample_rate: int):
"""Send audio to real caller via WebSocket binary frame""" """Send small audio chunk to caller via SignalWire WebSocket.
Encodes L16 PCM as base64 JSON per SignalWire protocol.
"""
ws = self._websockets.get(caller_id) ws = self._websockets.get(caller_id)
if not ws: if not ws:
return return
try: try:
import base64
if sample_rate != 16000: if sample_rate != 16000:
import numpy as np
import librosa
audio = np.frombuffer(pcm_data, dtype=np.int16).astype(np.float32) / 32768.0 audio = np.frombuffer(pcm_data, dtype=np.int16).astype(np.float32) / 32768.0
audio = librosa.resample(audio, orig_sr=sample_rate, target_sr=16000) ratio = 16000 / sample_rate
out_len = int(len(audio) * ratio)
indices = (np.arange(out_len) / ratio).astype(int)
indices = np.clip(indices, 0, len(audio) - 1)
audio = audio[indices]
pcm_data = (audio * 32767).astype(np.int16).tobytes() pcm_data = (audio * 32767).astype(np.int16).tobytes()
await ws.send_bytes(pcm_data)
payload = base64.b64encode(pcm_data).decode('ascii')
import json
await ws.send_text(json.dumps({
"event": "media",
"media": {"payload": payload}
}))
except Exception as e: except Exception as e:
print(f"[Caller] Failed to send audio: {e}") print(f"[Caller] Failed to send audio: {e}")
async def notify_caller(self, caller_id: str, message: dict): async def stream_audio_to_caller(self, caller_id: str, pcm_data: bytes, sample_rate: int):
"""Send JSON control message to caller""" """Stream large audio (TTS) to caller in real-time chunks via SignalWire WebSocket."""
ws = self._websockets.get(caller_id) ws = self._websockets.get(caller_id)
if ws: if not ws:
import json return
await ws.send_text(json.dumps(message))
async def disconnect_caller(self, caller_id: str): self.streaming_tts = True
"""Disconnect a caller's WebSocket""" try:
ws = self._websockets.get(caller_id) import base64
if ws: import json
try: audio = np.frombuffer(pcm_data, dtype=np.int16).astype(np.float32) / 32768.0
import json if sample_rate != 16000:
await ws.send_text(json.dumps({"status": "disconnected"})) ratio = 16000 / sample_rate
await ws.close() out_len = int(len(audio) * ratio)
except Exception: indices = (np.arange(out_len) / ratio).astype(int)
pass indices = np.clip(indices, 0, len(audio) - 1)
self._websockets.pop(caller_id, None) audio = audio[indices]
chunk_samples = 960
for i in range(0, len(audio), chunk_samples):
if caller_id not in self._websockets:
break
chunk = audio[i:i + chunk_samples]
pcm_chunk = (chunk * 32767).astype(np.int16).tobytes()
payload = base64.b64encode(pcm_chunk).decode('ascii')
await ws.send_text(json.dumps({
"event": "media",
"media": {"payload": payload}
}))
await asyncio.sleep(0.055)
except Exception as e:
print(f"[Caller] Failed to stream audio: {e}")
finally:
self.streaming_tts = False
def register_call_sid(self, caller_id: str, call_sid: str):
"""Track SignalWire callSid for a caller"""
self._call_sids[caller_id] = call_sid
def get_call_sid(self, caller_id: str) -> str | None:
"""Get SignalWire callSid for a caller"""
return self._call_sids.get(caller_id)
def unregister_call_sid(self, caller_id: str):
"""Remove callSid tracking"""
self._call_sids.pop(caller_id, None)

178
backend/services/news.py Normal file
View File

@@ -0,0 +1,178 @@
"""News service for current events awareness in AI callers"""
import asyncio
import time
import re
from dataclasses import dataclass
from urllib.parse import quote_plus
from xml.etree import ElementTree
import httpx
@dataclass
class NewsItem:
title: str
source: str
published: str
class NewsService:
def __init__(self):
self._client: httpx.AsyncClient | None = None
self._headlines_cache: list[NewsItem] = []
self._headlines_ts: float = 0
self._headlines_lock = asyncio.Lock()
self._search_cache: dict[str, tuple[float, list[NewsItem]]] = {}
self._search_lock = asyncio.Lock()
@property
def client(self) -> httpx.AsyncClient:
if self._client is None or self._client.is_closed:
self._client = httpx.AsyncClient(timeout=10.0)
return self._client
async def get_headlines(self) -> list[NewsItem]:
async with self._headlines_lock:
if self._headlines_cache and time.time() - self._headlines_ts < 1800:
return self._headlines_cache
try:
resp = await self.client.get("https://news.google.com/rss")
resp.raise_for_status()
items = self._parse_rss(resp.text, max_items=10)
self._headlines_cache = items
self._headlines_ts = time.time()
return items
except Exception as e:
print(f"[News] Headlines fetch failed: {e}")
if self._headlines_cache:
return self._headlines_cache
return []
async def search_topic(self, query: str) -> list[NewsItem]:
cache_key = query.lower()
async with self._search_lock:
if cache_key in self._search_cache:
ts, items = self._search_cache[cache_key]
if time.time() - ts < 600:
return items
# Evict oldest when cache too large
if len(self._search_cache) > 50:
oldest_key = min(self._search_cache, key=lambda k: self._search_cache[k][0])
del self._search_cache[oldest_key]
try:
encoded = quote_plus(query)
url = f"https://news.google.com/rss/search?q={encoded}&hl=en-US&gl=US&ceid=US:en"
resp = await self.client.get(url)
resp.raise_for_status()
items = self._parse_rss(resp.text, max_items=5)
async with self._search_lock:
self._search_cache[cache_key] = (time.time(), items)
return items
except Exception as e:
print(f"[News] Search failed for '{query}': {e}")
async with self._search_lock:
if cache_key in self._search_cache:
return self._search_cache[cache_key][1]
return []
def _parse_rss(self, xml_text: str, max_items: int = 10) -> list[NewsItem]:
items = []
try:
root = ElementTree.fromstring(xml_text)
for item_el in root.iter("item"):
if len(items) >= max_items:
break
title = item_el.findtext("title", "").strip()
source_el = item_el.find("source")
source = source_el.text.strip() if source_el is not None and source_el.text else ""
published = item_el.findtext("pubDate", "").strip()
if title:
items.append(NewsItem(title=title, source=source, published=published))
except ElementTree.ParseError as e:
print(f"[News] RSS parse error: {e}")
return items
def format_headlines_for_prompt(self, items: list[NewsItem]) -> str:
lines = []
for item in items:
if item.source:
lines.append(f"- {item.title} ({item.source})")
else:
lines.append(f"- {item.title}")
return "\n".join(lines)
async def close(self):
if self._client and not self._client.is_closed:
await self._client.aclose()
STOP_WORDS = {
"the", "a", "an", "is", "are", "was", "were", "be", "been", "being",
"have", "has", "had", "do", "does", "did", "will", "would", "could",
"should", "may", "might", "shall", "can", "need", "dare", "ought",
"used", "to", "of", "in", "for", "on", "with", "at", "by", "from",
"as", "into", "through", "during", "before", "after", "above", "below",
"between", "out", "off", "over", "under", "again", "further", "then",
"once", "here", "there", "when", "where", "why", "how", "all", "both",
"each", "few", "more", "most", "other", "some", "such", "no", "nor",
"not", "only", "own", "same", "so", "than", "too", "very", "just",
"but", "and", "or", "if", "while", "because", "until", "about",
"that", "this", "these", "those", "what", "which", "who", "whom",
"it", "its", "he", "him", "his", "she", "her", "they", "them",
"their", "we", "us", "our", "you", "your", "me", "my", "i",
# Casual speech fillers
"yeah", "well", "like", "man", "dude", "okay", "right", "know",
"think", "mean", "really", "actually", "honestly", "basically",
"literally", "stuff", "thing", "things", "something", "anything",
"nothing", "everything", "someone", "anyone", "everyone", "nobody",
"gonna", "wanna", "gotta", "kinda", "sorta", "dunno",
"look", "see", "say", "said", "tell", "told", "talk", "talking",
"feel", "felt", "guess", "sure", "maybe", "probably", "never",
"always", "still", "even", "much", "many", "also", "got", "get",
"getting", "going", "come", "came", "make", "made", "take", "took",
"give", "gave", "want", "keep", "kept", "let", "put", "went",
"been", "being", "doing", "having", "call", "called", "calling",
"tonight", "today", "night", "time", "long", "good", "bad",
"first", "last", "back", "down", "ever", "away", "cant", "dont",
"didnt", "doesnt", "isnt", "wasnt", "wont", "wouldnt", "couldnt",
"shouldnt", "aint", "stop", "start", "started", "help",
}
def extract_keywords(text: str, max_keywords: int = 3) -> list[str]:
words = text.split()
keywords = []
# Pass 1: capitalized words (proper nouns) not at sentence start
for i, word in enumerate(words):
clean = re.sub(r'[^\w]', '', word)
if not clean:
continue
is_sentence_start = i == 0 or (i > 0 and words[i - 1].rstrip()[-1:] in '.!?')
if clean[0].isupper() and not is_sentence_start and clean.lower() not in STOP_WORDS:
if clean not in keywords:
keywords.append(clean)
if len(keywords) >= max_keywords:
return keywords
# Pass 2: uncommon words (>4 chars, not in stop words)
for word in words:
clean = re.sub(r'[^\w]', '', word).lower()
if len(clean) > 4 and clean not in STOP_WORDS:
title_clean = clean.capitalize()
if title_clean not in keywords and clean not in [k.lower() for k in keywords]:
keywords.append(clean)
if len(keywords) >= max_keywords:
return keywords
return keywords
news_service = NewsService()

View File

@@ -1,155 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Call In - Luke at the Roost</title>
<style>
:root {
--bg: #1a1a2e;
--bg-light: #252547;
--accent: #e94560;
--text: #fff;
--text-muted: #888;
--radius: 8px;
}
* { box-sizing: border-box; margin: 0; padding: 0; }
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
background: var(--bg);
color: var(--text);
min-height: 100vh;
display: flex;
align-items: center;
justify-content: center;
}
.container {
max-width: 400px;
width: 100%;
padding: 30px;
text-align: center;
}
h1 {
font-size: 1.5em;
margin-bottom: 8px;
}
.subtitle {
color: var(--text-muted);
margin-bottom: 30px;
font-size: 0.9em;
}
.form-group {
margin-bottom: 20px;
}
.form-group input {
width: 100%;
padding: 12px 16px;
border: 1px solid #444;
border-radius: var(--radius);
background: var(--bg-light);
color: var(--text);
font-size: 1em;
outline: none;
}
.form-group input:focus {
border-color: var(--accent);
}
.call-btn {
width: 100%;
padding: 14px;
border: none;
border-radius: var(--radius);
background: var(--accent);
color: #fff;
font-size: 1.1em;
font-weight: 600;
cursor: pointer;
transition: opacity 0.2s;
}
.call-btn:hover { opacity: 0.9; }
.call-btn:disabled { opacity: 0.5; cursor: not-allowed; }
.hangup-btn {
width: 100%;
padding: 14px;
border: none;
border-radius: var(--radius);
background: #c0392b;
color: #fff;
font-size: 1.1em;
font-weight: 600;
cursor: pointer;
margin-top: 12px;
display: none;
}
.hangup-btn:hover { opacity: 0.9; }
.status {
margin-top: 20px;
padding: 16px;
background: var(--bg-light);
border-radius: var(--radius);
display: none;
}
.status.visible { display: block; }
.status-label {
font-size: 0.85em;
color: var(--text-muted);
text-transform: uppercase;
letter-spacing: 1px;
margin-bottom: 6px;
}
.status-text {
font-size: 1.1em;
font-weight: 500;
}
.on-air .status-text {
color: var(--accent);
font-weight: 700;
animation: pulse 1.5s ease-in-out infinite;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.6; }
}
.mic-meter {
margin-top: 16px;
height: 6px;
background: #333;
border-radius: 3px;
overflow: hidden;
display: none;
}
.mic-meter.visible { display: block; }
.mic-meter-fill {
height: 100%;
background: #2ecc71;
width: 0%;
transition: width 0.1s;
border-radius: 3px;
}
</style>
</head>
<body>
<div class="container">
<h1>Luke at the Roost</h1>
<p class="subtitle">Call in to the show</p>
<div class="form-group">
<input type="text" id="caller-name" placeholder="Your name" maxlength="30" autocomplete="off">
</div>
<button id="call-btn" class="call-btn">Call In</button>
<button id="hangup-btn" class="hangup-btn">Hang Up</button>
<div id="status" class="status">
<div class="status-label">Status</div>
<div id="status-text" class="status-text">Connecting...</div>
</div>
<div id="mic-meter" class="mic-meter">
<div id="mic-meter-fill" class="mic-meter-fill"></div>
</div>
</div>
<script src="/js/call-in.js"></script>
</body>
</html>

View File

@@ -53,7 +53,7 @@
<!-- Call Queue --> <!-- Call Queue -->
<section class="queue-section"> <section class="queue-section">
<h2>Incoming Calls <a href="/call-in" target="_blank" style="font-size:0.6em;font-weight:normal;color:var(--accent);">Call-in page</a></h2> <h2>Incoming Calls</h2>
<div id="call-queue" class="call-queue"> <div id="call-queue" class="call-queue">
<div class="queue-empty">No callers waiting</div> <div class="queue-empty">No callers waiting</div>
</div> </div>
@@ -128,7 +128,7 @@
</div> </div>
<div class="channel-row"> <div class="channel-row">
<label>Caller Ch <input type="number" id="caller-channel" value="1" min="1" max="16" class="channel-input"></label> <label>Caller Ch <input type="number" id="caller-channel" value="1" min="1" max="16" class="channel-input"></label>
<label>Live Ch <input type="number" id="live-caller-channel" value="4" min="1" max="16" class="channel-input"></label> <label>Live Ch <input type="number" id="live-caller-channel" value="9" min="1" max="16" class="channel-input"></label>
<label>Music Ch <input type="number" id="music-channel" value="2" min="1" max="16" class="channel-input"></label> <label>Music Ch <input type="number" id="music-channel" value="2" min="1" max="16" class="channel-input"></label>
<label>SFX Ch <input type="number" id="sfx-channel" value="3" min="1" max="16" class="channel-input"></label> <label>SFX Ch <input type="number" id="sfx-channel" value="3" min="1" max="16" class="channel-input"></label>
</div> </div>
@@ -207,6 +207,6 @@
</div> </div>
</div> </div>
<script src="/js/app.js?v=11"></script> <script src="/js/app.js?v=13"></script>
</body> </body>
</html> </html>

View File

@@ -229,7 +229,7 @@ async function loadAudioDevices() {
if (inputCh) inputCh.value = settings.input_channel || 1; if (inputCh) inputCh.value = settings.input_channel || 1;
if (callerCh) callerCh.value = settings.caller_channel || 1; if (callerCh) callerCh.value = settings.caller_channel || 1;
if (liveCallerCh) liveCallerCh.value = settings.live_caller_channel || 4; if (liveCallerCh) liveCallerCh.value = settings.live_caller_channel || 9;
if (musicCh) musicCh.value = settings.music_channel || 2; if (musicCh) musicCh.value = settings.music_channel || 2;
if (sfxCh) sfxCh.value = settings.sfx_channel || 3; if (sfxCh) sfxCh.value = settings.sfx_channel || 3;
@@ -265,7 +265,7 @@ async function saveAudioDevices() {
input_channel: inputChannel ? parseInt(inputChannel) : 1, input_channel: inputChannel ? parseInt(inputChannel) : 1,
output_device: outputDevice ? parseInt(outputDevice) : null, output_device: outputDevice ? parseInt(outputDevice) : null,
caller_channel: callerChannel ? parseInt(callerChannel) : 1, caller_channel: callerChannel ? parseInt(callerChannel) : 1,
live_caller_channel: liveCallerChannel ? parseInt(liveCallerChannel) : 4, live_caller_channel: liveCallerChannel ? parseInt(liveCallerChannel) : 9,
music_channel: musicChannel ? parseInt(musicChannel) : 2, music_channel: musicChannel ? parseInt(musicChannel) : 2,
sfx_channel: sfxChannel ? parseInt(sfxChannel) : 3, sfx_channel: sfxChannel ? parseInt(sfxChannel) : 3,
phone_filter: phoneFilterChecked phone_filter: phoneFilterChecked
@@ -878,7 +878,7 @@ function renderQueue(queue) {
const waitStr = mins > 0 ? `${mins}m ${secs}s` : `${secs}s`; const waitStr = mins > 0 ? `${mins}m ${secs}s` : `${secs}s`;
return ` return `
<div class="queue-item"> <div class="queue-item">
<span class="queue-name">${caller.name}</span> <span class="queue-name">${caller.phone}</span>
<span class="queue-wait">waiting ${waitStr}</span> <span class="queue-wait">waiting ${waitStr}</span>
<button class="queue-take-btn" onclick="takeCall('${caller.caller_id}')">Take Call</button> <button class="queue-take-btn" onclick="takeCall('${caller.caller_id}')">Take Call</button>
<button class="queue-drop-btn" onclick="dropCall('${caller.caller_id}')">Drop</button> <button class="queue-drop-btn" onclick="dropCall('${caller.caller_id}')">Drop</button>
@@ -893,7 +893,7 @@ async function takeCall(callerId) {
const data = await res.json(); const data = await res.json();
if (data.status === 'on_air') { if (data.status === 'on_air') {
showRealCaller(data.caller); showRealCaller(data.caller);
log(`${data.caller.name} is on air — Channel ${data.caller.channel}`); log(`${data.caller.phone} is on air — Channel ${data.caller.channel}`);
} }
} catch (err) { } catch (err) {
log('Failed to take call: ' + err.message); log('Failed to take call: ' + err.message);
@@ -936,7 +936,7 @@ function updateActiveCallIndicator() {
function showRealCaller(callerInfo) { function showRealCaller(callerInfo) {
const nameEl = document.getElementById('real-caller-name'); const nameEl = document.getElementById('real-caller-name');
const chEl = document.getElementById('real-caller-channel'); const chEl = document.getElementById('real-caller-channel');
if (nameEl) nameEl.textContent = callerInfo.name; if (nameEl) nameEl.textContent = callerInfo.phone;
if (chEl) chEl.textContent = `Ch ${callerInfo.channel}`; if (chEl) chEl.textContent = `Ch ${callerInfo.channel}`;
document.getElementById('real-caller-info')?.classList.remove('hidden'); document.getElementById('real-caller-info')?.classList.remove('hidden');

View File

@@ -1,232 +0,0 @@
/**
* Call-In Page — Browser WebSocket audio streaming
* Captures mic via AudioWorklet, sends Int16 PCM 16kHz mono over WebSocket.
* Receives Int16 PCM 16kHz mono back for playback.
*/
let ws = null;
let audioCtx = null;
let micStream = null;
let workletNode = null;
let nextPlayTime = 0;
let callerId = null;
const callBtn = document.getElementById('call-btn');
const hangupBtn = document.getElementById('hangup-btn');
const statusEl = document.getElementById('status');
const statusText = document.getElementById('status-text');
const nameInput = document.getElementById('caller-name');
const micMeter = document.getElementById('mic-meter');
const micMeterFill = document.getElementById('mic-meter-fill');
callBtn.addEventListener('click', startCall);
hangupBtn.addEventListener('click', hangUp);
nameInput.addEventListener('keydown', e => {
if (e.key === 'Enter') startCall();
});
async function startCall() {
const name = nameInput.value.trim() || 'Anonymous';
callBtn.disabled = true;
setStatus('Connecting...', false);
try {
// Get mic access
micStream = await navigator.mediaDevices.getUserMedia({
audio: { echoCancellation: true, noiseSuppression: true, sampleRate: 16000 }
});
// Set up AudioContext
audioCtx = new AudioContext({ sampleRate: 48000 });
// Register worklet processor inline via blob
const processorCode = `
class CallerProcessor extends AudioWorkletProcessor {
constructor() {
super();
this.buffer = [];
this.targetSamples = 4096; // ~256ms at 16kHz
}
process(inputs) {
const input = inputs[0][0];
if (!input) return true;
// Downsample from sampleRate to 16000
const ratio = sampleRate / 16000;
for (let i = 0; i < input.length; i += ratio) {
const idx = Math.floor(i);
if (idx < input.length) {
this.buffer.push(input[idx]);
}
}
if (this.buffer.length >= this.targetSamples) {
const chunk = this.buffer.splice(0, this.targetSamples);
const int16 = new Int16Array(chunk.length);
for (let i = 0; i < chunk.length; i++) {
const s = Math.max(-1, Math.min(1, chunk[i]));
int16[i] = s < 0 ? s * 32768 : s * 32767;
}
this.port.postMessage(int16.buffer, [int16.buffer]);
}
return true;
}
}
registerProcessor('caller-processor', CallerProcessor);
`;
const blob = new Blob([processorCode], { type: 'application/javascript' });
const blobUrl = URL.createObjectURL(blob);
await audioCtx.audioWorklet.addModule(blobUrl);
URL.revokeObjectURL(blobUrl);
// Connect mic to worklet
const source = audioCtx.createMediaStreamSource(micStream);
workletNode = new AudioWorkletNode(audioCtx, 'caller-processor');
// Connect WebSocket
const proto = location.protocol === 'https:' ? 'wss:' : 'ws:';
ws = new WebSocket(`${proto}//${location.host}/api/caller/stream`);
ws.binaryType = 'arraybuffer';
ws.onopen = () => {
ws.send(JSON.stringify({ type: 'join', name }));
};
ws.onmessage = (event) => {
if (typeof event.data === 'string') {
handleControlMessage(JSON.parse(event.data));
} else {
handleAudioData(event.data);
}
};
ws.onclose = () => {
setStatus('Disconnected', false);
cleanup();
};
ws.onerror = () => {
setStatus('Connection error', false);
cleanup();
};
// Forward mic audio to WebSocket
workletNode.port.onmessage = (e) => {
if (ws && ws.readyState === WebSocket.OPEN) {
ws.send(e.data);
}
};
source.connect(workletNode);
// Don't connect worklet to destination — we don't want to hear our own mic
// Show mic meter
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 256;
source.connect(analyser);
startMicMeter(analyser);
// UI
nameInput.disabled = true;
hangupBtn.style.display = 'block';
} catch (err) {
console.error('Call error:', err);
setStatus('Failed: ' + err.message, false);
callBtn.disabled = false;
cleanup();
}
}
function handleControlMessage(msg) {
if (msg.status === 'queued') {
callerId = msg.caller_id;
setStatus(`Waiting in queue (position ${msg.position})...`, false);
} else if (msg.status === 'on_air') {
setStatus('ON AIR', true);
nextPlayTime = audioCtx.currentTime;
} else if (msg.status === 'disconnected') {
setStatus('Disconnected', false);
cleanup();
}
}
function handleAudioData(buffer) {
if (!audioCtx) return;
const int16 = new Int16Array(buffer);
const float32 = new Float32Array(int16.length);
for (let i = 0; i < int16.length; i++) {
float32[i] = int16[i] / 32768;
}
const audioBuf = audioCtx.createBuffer(1, float32.length, 16000);
audioBuf.copyToChannel(float32, 0);
const source = audioCtx.createBufferSource();
source.buffer = audioBuf;
source.connect(audioCtx.destination);
const now = audioCtx.currentTime;
if (nextPlayTime < now) {
nextPlayTime = now;
}
source.start(nextPlayTime);
nextPlayTime += audioBuf.duration;
}
function hangUp() {
if (ws && ws.readyState === WebSocket.OPEN) {
ws.close();
}
setStatus('Disconnected', false);
cleanup();
}
function cleanup() {
if (workletNode) {
workletNode.disconnect();
workletNode = null;
}
if (micStream) {
micStream.getTracks().forEach(t => t.stop());
micStream = null;
}
if (audioCtx) {
audioCtx.close().catch(() => {});
audioCtx = null;
}
ws = null;
callerId = null;
callBtn.disabled = false;
nameInput.disabled = false;
hangupBtn.style.display = 'none';
micMeter.classList.remove('visible');
}
function setStatus(text, isOnAir) {
statusEl.classList.add('visible');
statusText.textContent = text;
if (isOnAir) {
statusEl.classList.add('on-air');
} else {
statusEl.classList.remove('on-air');
}
}
function startMicMeter(analyser) {
micMeter.classList.add('visible');
const data = new Uint8Array(analyser.frequencyBinCount);
function update() {
if (!analyser || !audioCtx) return;
analyser.getByteFrequencyData(data);
let sum = 0;
for (let i = 0; i < data.length; i++) sum += data[i];
const avg = sum / data.length;
const pct = Math.min(100, (avg / 128) * 100);
micMeterFill.style.width = pct + '%';
requestAnimationFrame(update);
}
update();
}

View File

@@ -11,17 +11,17 @@ def test_queue_starts_empty():
def test_add_caller_to_queue(): def test_add_caller_to_queue():
svc = CallerService() svc = CallerService()
svc.add_to_queue("abc123", "Dave") svc.add_to_queue("abc123", "+15551234567")
q = svc.get_queue() q = svc.get_queue()
assert len(q) == 1 assert len(q) == 1
assert q[0]["caller_id"] == "abc123" assert q[0]["caller_id"] == "abc123"
assert q[0]["name"] == "Dave" assert q[0]["phone"] == "+15551234567"
assert "wait_time" in q[0] assert "wait_time" in q[0]
def test_remove_caller_from_queue(): def test_remove_caller_from_queue():
svc = CallerService() svc = CallerService()
svc.add_to_queue("abc123", "Dave") svc.add_to_queue("abc123", "+15551234567")
svc.remove_from_queue("abc123") svc.remove_from_queue("abc123")
assert svc.get_queue() == [] assert svc.get_queue() == []
@@ -39,7 +39,7 @@ def test_allocate_channel():
def test_take_call(): def test_take_call():
svc = CallerService() svc = CallerService()
svc.add_to_queue("abc123", "Dave") svc.add_to_queue("abc123", "+15551234567")
result = svc.take_call("abc123") result = svc.take_call("abc123")
assert result["caller_id"] == "abc123" assert result["caller_id"] == "abc123"
assert result["channel"] >= 3 assert result["channel"] >= 3
@@ -49,7 +49,7 @@ def test_take_call():
def test_hangup_real_caller(): def test_hangup_real_caller():
svc = CallerService() svc = CallerService()
svc.add_to_queue("abc123", "Dave") svc.add_to_queue("abc123", "+15551234567")
svc.take_call("abc123") svc.take_call("abc123")
ch = svc.active_calls["abc123"]["channel"] ch = svc.active_calls["abc123"]["channel"]
svc.hangup("abc123") svc.hangup("abc123")
@@ -59,12 +59,12 @@ def test_hangup_real_caller():
def test_caller_counter_increments(): def test_caller_counter_increments():
svc = CallerService() svc = CallerService()
svc.add_to_queue("id1", "Dave") svc.add_to_queue("id1", "+15551234567")
svc.add_to_queue("id2", "Sarah") svc.add_to_queue("id2", "+15559876543")
r1 = svc.take_call("id1") r1 = svc.take_call("id1")
r2 = svc.take_call("id2") r2 = svc.take_call("id2")
assert r1["name"] == "Dave" assert r1["phone"] == "+15551234567"
assert r2["name"] == "Sarah" assert r2["phone"] == "+15559876543"
def test_register_and_unregister_websocket(): def test_register_and_unregister_websocket():
@@ -78,7 +78,7 @@ def test_register_and_unregister_websocket():
def test_hangup_clears_websocket(): def test_hangup_clears_websocket():
svc = CallerService() svc = CallerService()
svc.add_to_queue("abc123", "Dave") svc.add_to_queue("abc123", "+15551234567")
svc.take_call("abc123") svc.take_call("abc123")
svc.register_websocket("abc123", object()) svc.register_websocket("abc123", object())
svc.hangup("abc123") svc.hangup("abc123")
@@ -102,64 +102,17 @@ def test_send_audio_no_websocket():
) )
def test_notify_caller(): def test_send_audio_json():
"""notify_caller sends JSON text to WebSocket""" """send_audio_to_caller sends base64 JSON via SignalWire protocol"""
import asyncio import asyncio
class FakeWS:
def __init__(self):
self.sent = []
async def send_text(self, data):
self.sent.append(data)
svc = CallerService()
ws = FakeWS()
svc.register_websocket("abc123", ws)
asyncio.get_event_loop().run_until_complete(
svc.notify_caller("abc123", {"status": "on_air", "channel": 3})
)
assert len(ws.sent) == 1
import json import json
msg = json.loads(ws.sent[0]) import base64
assert msg["status"] == "on_air"
assert msg["channel"] == 3
def test_disconnect_caller():
"""disconnect_caller sends disconnected message and removes WS"""
import asyncio
class FakeWS: class FakeWS:
def __init__(self): def __init__(self):
self.sent = [] self.sent_text = []
self.closed = False
async def send_text(self, data): async def send_text(self, data):
self.sent.append(data) self.sent_text.append(data)
async def close(self):
self.closed = True
svc = CallerService()
ws = FakeWS()
svc.register_websocket("abc123", ws)
asyncio.get_event_loop().run_until_complete(
svc.disconnect_caller("abc123")
)
assert ws.closed
assert "abc123" not in svc._websockets
import json
msg = json.loads(ws.sent[0])
assert msg["status"] == "disconnected"
def test_send_audio_binary():
"""send_audio_to_caller sends raw PCM bytes (not mulaw/JSON)"""
import asyncio
class FakeWS:
def __init__(self):
self.sent_bytes = []
async def send_bytes(self, data):
self.sent_bytes.append(data)
svc = CallerService() svc = CallerService()
ws = FakeWS() ws = FakeWS()
@@ -168,13 +121,15 @@ def test_send_audio_binary():
asyncio.get_event_loop().run_until_complete( asyncio.get_event_loop().run_until_complete(
svc.send_audio_to_caller("abc123", pcm, 16000) svc.send_audio_to_caller("abc123", pcm, 16000)
) )
assert len(ws.sent_bytes) == 1 assert len(ws.sent_text) == 1
assert ws.sent_bytes[0] == pcm msg = json.loads(ws.sent_text[0])
assert msg["event"] == "media"
assert base64.b64decode(msg["media"]["payload"]) == pcm
def test_take_call_preserves_caller_name(): def test_take_call_preserves_caller_phone():
"""take_call uses the name from the queue, not a generic counter name""" """take_call uses the phone from the queue"""
svc = CallerService() svc = CallerService()
svc.add_to_queue("abc123", "Dave from Chicago") svc.add_to_queue("abc123", "+15551234567")
result = svc.take_call("abc123") result = svc.take_call("abc123")
assert result["name"] == "Dave from Chicago" assert result["phone"] == "+15551234567"