Compare commits

..

2 Commits

Author SHA1 Message Date
d85a8d4511 Add listener email system with IMAP polling, TTS playback, and show awareness
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-16 05:22:56 -07:00
f0271e61df Clip pipeline improvements, direct YouTube upload, hero redesign, how-it-works updates
- make_clips: migrate refine_clip_timestamps to mlx-whisper, add LLM caption
  polishing, fix speaker label reversal in grouped caption lines
- upload_clips: interactive episode/clip/platform menus, direct YouTube Shorts
  upload via Data API v3 (bypasses Postiz), direct Bluesky upload
- Website hero: centered layout with left-column cover art on desktop, compact
  text links instead of pill buttons, scaled up typography
- How-it-works: move anatomy section above diagram, update stats (320 names,
  189+ personality layers, 20 towns, 570+ topics, 1400+ scenarios), add
  drunk/high/unhinged callers, voicemails, MLX Whisper GPU, LLM-polished captions
- All footers: add System Status link, remove Ko-fi branding
- .gitignore: YouTube OAuth credential files

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-16 04:06:23 -07:00
15 changed files with 926 additions and 281 deletions

4
.gitignore vendored
View File

@@ -50,5 +50,9 @@ voices-v1.0.bin
# Reference voices for TTS
ref_audio/
# YouTube OAuth credentials
youtube_client_secrets.json
youtube_token.json
# Claude settings (local)
.claude/

View File

@@ -22,6 +22,11 @@ class Settings(BaseSettings):
signalwire_phone: str = os.getenv("SIGNALWIRE_PHONE", "")
signalwire_stream_url: str = os.getenv("SIGNALWIRE_STREAM_URL", "")
# Email (IMAP)
submissions_imap_host: str = os.getenv("SUBMISSIONS_IMAP_HOST", "")
submissions_imap_user: str = os.getenv("SUBMISSIONS_IMAP_USER", "")
submissions_imap_pass: str = os.getenv("SUBMISSIONS_IMAP_PASS", "")
# LLM Settings
llm_provider: str = "openrouter" # "openrouter" or "ollama"
openrouter_model: str = "anthropic/claude-sonnet-4-5"

View File

@@ -2717,15 +2717,22 @@ class Session:
def get_show_history(self) -> str:
"""Get formatted show history for AI caller prompts.
Randomly picks one previous caller to have a strong reaction to."""
if not self.call_history:
if not self.call_history and not any(e.read_on_air for e in _listener_emails):
return ""
lines = ["EARLIER IN THE SHOW:"]
for record in self.call_history:
caller_type_label = "(real caller)" if record.caller_type == "real" else "(AI)"
lines.append(f"- {record.caller_name} {caller_type_label}: {record.summary}")
# Include emails that were read on the show
read_emails = [e for e in _listener_emails if e.read_on_air]
for em in read_emails:
sender_name = em.sender.split("<")[0].strip().strip('"') if "<" in em.sender else "a listener"
preview = em.body[:150] if len(em.body) > 150 else em.body
lines.append(f"- A listener email from {sender_name} was read on air: \"{em.subject}\"{preview}")
# 20% chance to have a strong reaction to a previous caller
if random.random() < 0.20:
if self.call_history and random.random() < 0.20:
target = random.choice(self.call_history)
reaction = random.choice(SHOW_HISTORY_REACTIONS)
lines.append(f"\nYOU HEARD {target.caller_name.upper()} EARLIER and you {reaction}. Mention it if it comes up.")
@@ -3092,7 +3099,9 @@ async def _sync_signalwire_voicemails():
async def startup():
"""Pre-generate caller backgrounds on server start"""
_load_voicemails()
_load_emails()
asyncio.create_task(_sync_signalwire_voicemails())
asyncio.create_task(_poll_imap_emails())
restored = _load_checkpoint()
if not restored:
asyncio.create_task(_pregenerate_backgrounds())
@@ -3418,6 +3427,196 @@ async def delete_voicemail(vm_id: str):
return {"status": "deleted"}
# --- Listener Emails ---
EMAILS_META = Path(__file__).parent.parent / "data" / "emails.json"
@dataclass
class ListenerEmail:
id: str
sender: str
subject: str
body: str
timestamp: float
read_on_air: bool = False
_listener_emails: list[ListenerEmail] = []
def _load_emails():
global _listener_emails
if EMAILS_META.exists():
try:
with open(EMAILS_META) as f:
data = json.load(f)
_listener_emails = [
ListenerEmail(
id=e["id"], sender=e["sender"], subject=e["subject"],
body=e["body"], timestamp=e["timestamp"],
read_on_air=e.get("read_on_air", False),
)
for e in data.get("emails", [])
]
print(f"[Email] Loaded {len(_listener_emails)} emails")
except Exception as e:
print(f"[Email] Failed to load: {e}")
_listener_emails = []
def _save_emails():
try:
EMAILS_META.parent.mkdir(parents=True, exist_ok=True)
data = {
"emails": [
{
"id": e.id, "sender": e.sender, "subject": e.subject,
"body": e.body, "timestamp": e.timestamp,
"read_on_air": e.read_on_air,
}
for e in _listener_emails
],
}
with open(EMAILS_META, "w") as f:
json.dump(data, f, indent=2)
except Exception as exc:
print(f"[Email] Failed to save: {exc}")
async def _poll_imap_emails():
"""Background task: poll IMAP every 30s for new listener emails"""
import imaplib
import email as email_lib
from email.header import decode_header
host = settings.submissions_imap_host
user = settings.submissions_imap_user
passwd = settings.submissions_imap_pass
if not host or not user or not passwd:
print("[Email] IMAP not configured, skipping email polling")
return
while True:
try:
mail = imaplib.IMAP4_SSL(host, 993)
mail.login(user, passwd)
mail.select("INBOX")
_, msg_nums = mail.search(None, "UNSEEN")
if msg_nums[0]:
for num in msg_nums[0].split():
_, msg_data = mail.fetch(num, "(RFC822)")
raw = msg_data[0][1]
msg = email_lib.message_from_bytes(raw)
# Decode sender
from_raw = msg.get("From", "Unknown")
# Decode subject
subj_raw = msg.get("Subject", "(no subject)")
decoded_parts = decode_header(subj_raw)
subject = ""
for part, charset in decoded_parts:
if isinstance(part, bytes):
subject += part.decode(charset or "utf-8", errors="replace")
else:
subject += part
# Extract plain text body
body = ""
if msg.is_multipart():
for part in msg.walk():
if part.get_content_type() == "text/plain":
payload = part.get_payload(decode=True)
if payload:
charset = part.get_content_charset() or "utf-8"
body = payload.decode(charset, errors="replace")
break
else:
payload = msg.get_payload(decode=True)
if payload:
charset = msg.get_content_charset() or "utf-8"
body = payload.decode(charset, errors="replace")
body = body.strip()
if not body:
continue
# Parse timestamp from email Date header
from email.utils import parsedate_to_datetime
try:
ts = parsedate_to_datetime(msg.get("Date", "")).timestamp()
except Exception:
ts = time.time()
em = ListenerEmail(
id=str(uuid.uuid4())[:8],
sender=from_raw,
subject=subject,
body=body,
timestamp=ts,
)
_listener_emails.append(em)
print(f"[Email] New email from {from_raw}: {subject[:50]}")
# Mark as SEEN (already done by fetch with UNSEEN filter)
mail.store(num, "+FLAGS", "\\Seen")
_save_emails()
mail.logout()
except Exception as exc:
print(f"[Email] IMAP poll error: {exc}")
await asyncio.sleep(30)
@app.get("/api/emails")
async def list_emails():
return [
{
"id": e.id, "sender": e.sender, "subject": e.subject,
"body": e.body, "timestamp": e.timestamp,
"read_on_air": e.read_on_air,
}
for e in sorted(_listener_emails, key=lambda e: e.timestamp, reverse=True)
]
@app.post("/api/email/{email_id}/play-on-air")
async def play_email_on_air(email_id: str):
em = next((e for e in _listener_emails if e.id == email_id), None)
if not em:
raise HTTPException(status_code=404, detail="Email not found")
# Extract display name, fall back to just "a listener"
sender_name = em.sender.split("<")[0].strip().strip('"') if "<" in em.sender else "a listener"
intro = f"This email is from {sender_name}. Subject: {em.subject}."
full_text = f"{intro}\n\n{em.body}"
async def _generate_and_play():
try:
audio_bytes = await generate_speech(full_text, "Alex", phone_quality="none", apply_filter=False)
audio_service.play_caller_audio(audio_bytes, 24000)
except Exception as exc:
print(f"[Email] TTS playback error: {exc}")
asyncio.create_task(_generate_and_play())
em.read_on_air = True
_save_emails()
return {"status": "playing"}
@app.delete("/api/email/{email_id}")
async def delete_email(email_id: str):
em = next((e for e in _listener_emails if e.id == email_id), None)
if not em:
raise HTTPException(status_code=404, detail="Email not found")
_listener_emails.remove(em)
_save_emails()
return {"status": "deleted"}
async def _signalwire_end_call(call_sid: str):
"""End a phone call via SignalWire REST API"""
if not call_sid or not settings.signalwire_space:

View File

@@ -761,3 +761,13 @@ section h2 {
.vm-btn.save:hover { background: #2a5db0; }
.vm-btn.delete { background: var(--accent-red); color: white; }
.vm-btn.delete:hover { background: #e03030; }
/* Listener Emails */
.email-item { display: flex; flex-direction: column; gap: 0.25rem; padding: 0.5rem; border-bottom: 1px solid rgba(232, 121, 29, 0.08); }
.email-item:last-child { border-bottom: none; }
.email-item.vm-unlistened { background: rgba(232, 121, 29, 0.06); }
.email-header { display: flex; justify-content: space-between; align-items: center; }
.email-sender { color: var(--accent); font-size: 0.85rem; font-weight: 600; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }
.email-subject { font-size: 0.85rem; font-weight: 500; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }
.email-preview { font-size: 0.8rem; color: var(--text-muted); line-height: 1.3; }
.email-item .vm-actions { margin-top: 0.25rem; }

View File

@@ -73,6 +73,14 @@
</div>
</section>
<!-- Listener Emails -->
<section class="voicemail-section">
<h2>Emails <span id="email-badge" class="voicemail-badge hidden">0</span></h2>
<div id="email-list" class="voicemail-list" style="max-height:300px">
<div class="queue-empty">No emails</div>
</div>
</section>
<!-- Chat -->
<section class="chat-section">
<div id="chat" class="chat-log"></div>

View File

@@ -62,6 +62,8 @@ document.addEventListener('DOMContentLoaded', async () => {
initEventListeners();
loadVoicemails();
setInterval(loadVoicemails, 30000);
loadEmails();
setInterval(loadEmails, 30000);
log('Ready. Configure audio devices in Settings, then click a caller to start.');
console.log('AI Radio Show ready');
} catch (err) {
@@ -1360,3 +1362,85 @@ async function deleteVoicemail(id) {
log('Failed to delete voicemail: ' + err.message);
}
}
// --- Listener Emails ---
async function loadEmails() {
try {
const res = await fetch('/api/emails');
const data = await res.json();
renderEmails(data);
} catch (err) {}
}
function renderEmails(emails) {
const list = document.getElementById('email-list');
const badge = document.getElementById('email-badge');
if (!list) return;
const unread = emails.filter(e => !e.read_on_air).length;
if (badge) {
badge.textContent = unread;
badge.classList.toggle('hidden', unread === 0);
}
if (emails.length === 0) {
list.innerHTML = '<div class="queue-empty">No emails</div>';
return;
}
list.innerHTML = emails.map(e => {
const date = new Date(e.timestamp * 1000);
const timeStr = date.toLocaleDateString() + ' ' + date.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
const preview = e.body.length > 120 ? e.body.substring(0, 120) + '…' : e.body;
const unreadCls = e.read_on_air ? '' : ' vm-unlistened';
const senderName = e.sender.replace(/<.*>/, '').trim() || e.sender;
return `<div class="email-item${unreadCls}" data-id="${e.id}">
<div class="email-header">
<span class="email-sender">${escapeHtml(senderName)}</span>
<span class="vm-time">${timeStr}</span>
</div>
<div class="email-subject">${escapeHtml(e.subject)}</div>
<div class="email-preview">${escapeHtml(preview)}</div>
<div class="vm-actions">
<button class="vm-btn listen" onclick="viewEmail('${e.id}')">View</button>
<button class="vm-btn on-air" onclick="playEmailOnAir('${e.id}')">On Air (TTS)</button>
<button class="vm-btn delete" onclick="deleteEmail('${e.id}')">Del</button>
</div>
</div>`;
}).join('');
}
function escapeHtml(str) {
const div = document.createElement('div');
div.textContent = str;
return div.innerHTML;
}
function viewEmail(id) {
fetch('/api/emails').then(r => r.json()).then(emails => {
const em = emails.find(e => e.id === id);
if (!em) return;
alert(`From: ${em.sender}\nSubject: ${em.subject}\n\n${em.body}`);
});
}
async function playEmailOnAir(id) {
try {
await safeFetch(`/api/email/${id}/play-on-air`, { method: 'POST' });
log('Reading email on air (TTS)');
loadEmails();
} catch (err) {
log('Failed to play email: ' + err.message);
}
}
async function deleteEmail(id) {
if (!confirm('Delete this email?')) return;
try {
await safeFetch(`/api/email/${id}`, { method: 'DELETE' });
loadEmails();
} catch (err) {
log('Failed to delete email: ' + err.message);
}
}

View File

@@ -31,8 +31,8 @@ load_dotenv(Path(__file__).parent / ".env")
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
RSS_FEED_URL = "https://podcast.macneilmediagroup.com/@LukeAtTheRoost/feed.xml"
EPISODE_CACHE_DIR = Path(__file__).parent / "clips" / ".episode-cache"
WHISPER_MODEL_FAST = "base"
WHISPER_MODEL_QUALITY = "large-v3"
WHISPER_MODEL_FAST = "distil-large-v3"
WHISPER_MODEL_QUALITY = "distil-large-v3"
COVER_ART = Path(__file__).parent / "website" / "images" / "cover.png"
# Fonts
@@ -71,7 +71,7 @@ def _build_whisper_prompt(labeled_transcript: str) -> str:
def transcribe_with_timestamps(audio_path: str, whisper_model: str = None,
labeled_transcript: str = "") -> list[dict]:
"""Transcribe audio with word-level timestamps using faster-whisper.
"""Transcribe audio with word-level timestamps using mlx-whisper (Apple Silicon GPU).
Returns list of segments: [{start, end, text, words: [{word, start, end}]}]
"""
@@ -83,43 +83,51 @@ def transcribe_with_timestamps(audio_path: str, whisper_model: str = None,
return json.load(f)
try:
from faster_whisper import WhisperModel
import mlx_whisper
except ImportError:
print("Error: faster-whisper not installed. Run: pip install faster-whisper")
print("Error: mlx-whisper not installed. Run: pip install mlx-whisper")
sys.exit(1)
MODEL_HF_REPOS = {
"distil-large-v3": "mlx-community/distil-whisper-large-v3",
"large-v3": "mlx-community/whisper-large-v3-mlx",
"medium": "mlx-community/whisper-medium-mlx",
"small": "mlx-community/whisper-small-mlx",
"base": "mlx-community/whisper-base-mlx",
}
hf_repo = MODEL_HF_REPOS.get(model_name, f"mlx-community/whisper-{model_name}-mlx")
initial_prompt = _build_whisper_prompt(labeled_transcript)
print(f" Model: {model_name}")
print(f" Model: {model_name} (MLX GPU)")
if labeled_transcript:
print(f" Prompt: {initial_prompt[:100]}...")
model = WhisperModel(model_name, compute_type="float32")
segments_iter, info = model.transcribe(
result = mlx_whisper.transcribe(
audio_path,
path_or_hf_repo=hf_repo,
language="en",
word_timestamps=True,
initial_prompt=initial_prompt,
language="en",
beam_size=5,
vad_filter=True,
)
segments = []
for seg in segments_iter:
for seg in result.get("segments", []):
words = []
if seg.words:
for w in seg.words:
words.append({
"word": w.word.strip(),
"start": round(w.start, 3),
"end": round(w.end, 3),
})
for w in seg.get("words", []):
words.append({
"word": w["word"].strip(),
"start": round(w["start"], 3),
"end": round(w["end"], 3),
})
segments.append({
"start": round(seg.start, 3),
"end": round(seg.end, 3),
"text": seg.text.strip(),
"start": round(seg["start"], 3),
"end": round(seg["end"], 3),
"text": seg["text"].strip(),
"words": words,
})
print(f" Transcribed {info.duration:.1f}s ({len(segments)} segments)")
duration = segments[-1]["end"] if segments else 0
print(f" Transcribed {duration:.1f}s ({len(segments)} segments)")
with open(cache_path, "w") as f:
json.dump(segments, f)
@@ -131,33 +139,39 @@ def transcribe_with_timestamps(audio_path: str, whisper_model: str = None,
def refine_clip_timestamps(audio_path: str, clips: list[dict],
quality_model: str, labeled_transcript: str = "",
) -> dict[int, list[dict]]:
"""Re-transcribe just the selected clip ranges with a high-quality model.
"""Re-transcribe just the selected clip ranges with mlx-whisper (GPU).
Extracts each clip segment, runs the quality model on it, and returns
refined segments with timestamps mapped back to the original timeline.
refined segments with word-level timestamps mapped back to the original timeline.
Returns: {clip_index: [segments]} keyed by clip index
"""
try:
from faster_whisper import WhisperModel
import mlx_whisper
except ImportError:
print("Error: faster-whisper not installed. Run: pip install faster-whisper")
print("Error: mlx-whisper not installed. Run: pip install mlx-whisper")
sys.exit(1)
initial_prompt = _build_whisper_prompt(labeled_transcript)
print(f" Refinement model: {quality_model}")
MODEL_HF_REPOS = {
"distil-large-v3": "mlx-community/distil-whisper-large-v3",
"large-v3": "mlx-community/whisper-large-v3-mlx",
"medium": "mlx-community/whisper-medium-mlx",
"small": "mlx-community/whisper-small-mlx",
"base": "mlx-community/whisper-base-mlx",
}
hf_repo = MODEL_HF_REPOS.get(quality_model, f"mlx-community/whisper-{quality_model}-mlx")
model = None # Lazy-load so we skip if all cached
print(f" Refinement model: {quality_model} (MLX GPU)")
initial_prompt = _build_whisper_prompt(labeled_transcript)
refined = {}
with tempfile.TemporaryDirectory() as tmp:
for i, clip in enumerate(clips):
# Add padding around clip for context (Whisper does better with some lead-in)
pad = 3.0
seg_start = max(0, clip["start_time"] - pad)
seg_end = clip["end_time"] + pad
# Check cache first
cache_key = f"{Path(audio_path).stem}_clip{i}_{seg_start:.1f}-{seg_end:.1f}"
cache_path = Path(audio_path).parent / f".whisper_refine_{quality_model}_{cache_key}.json"
if cache_path.exists():
@@ -166,7 +180,6 @@ def refine_clip_timestamps(audio_path: str, clips: list[dict],
refined[i] = json.load(f)
continue
# Extract clip segment to temp WAV
seg_path = os.path.join(tmp, f"segment_{i}.wav")
cmd = [
"ffmpeg", "-y", "-ss", str(seg_start), "-t", str(seg_end - seg_start),
@@ -178,39 +191,35 @@ def refine_clip_timestamps(audio_path: str, clips: list[dict],
refined[i] = []
continue
# Lazy-load model on first non-cached clip
if model is None:
model = WhisperModel(quality_model, compute_type="float32")
segments_iter, info = model.transcribe(
mlx_result = mlx_whisper.transcribe(
seg_path,
path_or_hf_repo=hf_repo,
language="en",
word_timestamps=True,
initial_prompt=initial_prompt,
language="en",
beam_size=5,
vad_filter=True,
)
# Collect segments and offset timestamps back to original timeline
segments = []
for seg in segments_iter:
for seg_data in mlx_result.get("segments", []):
text = seg_data["text"].strip()
words = []
if seg.words:
for w in seg.words:
words.append({
"word": w.word.strip(),
"start": round(w.start + seg_start, 3),
"end": round(w.end + seg_start, 3),
})
for w in seg_data.get("words", []):
words.append({
"word": w["word"].strip(),
"start": round(w["start"] + seg_start, 3),
"end": round(w["end"] + seg_start, 3),
})
segments.append({
"start": round(seg.start + seg_start, 3),
"end": round(seg.end + seg_start, 3),
"text": seg.text.strip(),
"start": round(seg_data["start"] + seg_start, 3),
"end": round(seg_data["end"] + seg_start, 3),
"text": text,
"words": words,
})
refined[i] = segments
print(f" Clip {i+1}: Refined {info.duration:.1f}s → {len(segments)} segments")
seg_duration = segments[-1]["end"] - segments[0]["start"] if segments else 0
print(f" Clip {i+1}: Refined {seg_duration:.1f}s → {len(segments)} segments")
with open(cache_path, "w") as f:
json.dump(segments, f)
@@ -694,32 +703,116 @@ def _interpolate_speaker(idx: int, matched: dict, n_words: int) -> str | None:
return None
def polish_clip_words(words: list[dict], labeled_transcript: str = "") -> list[dict]:
"""Use LLM to fix punctuation, capitalization, and misheard words.
Sends the raw whisper words to an LLM, gets back a corrected version,
and maps corrections back to the original timed words.
"""
if not words or not OPENROUTER_API_KEY:
return words
raw_text = " ".join(w["word"] for w in words)
context = ""
if labeled_transcript:
context = f"\nFor reference, here's the speaker-labeled transcript of this section (use it to correct misheard words and names):\n{labeled_transcript[:3000]}\n"
prompt = f"""Fix this podcast transcript excerpt so it reads as proper sentences. Fix punctuation, capitalization, and obvious misheard words.
RULES:
- Keep the EXACT same number of words in the EXACT same order
- Only change capitalization, punctuation attached to words, and obvious mishearings
- Do NOT add, remove, merge, or reorder words
- Contractions count as one word (don't = 1 word)
- Return ONLY the corrected text, nothing else
{context}
RAW TEXT ({len(words)} words):
{raw_text}"""
try:
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"Content-Type": "application/json",
},
json={
"model": "anthropic/claude-sonnet-4-5",
"messages": [{"role": "user", "content": prompt}],
"max_tokens": 2048,
"temperature": 0,
},
timeout=30,
)
if response.status_code != 200:
print(f" Polish failed ({response.status_code}), using raw text")
return words
polished = response.json()["choices"][0]["message"]["content"].strip()
polished_words = polished.split()
if len(polished_words) != len(words):
print(f" Polish word count mismatch ({len(polished_words)} vs {len(words)}), using raw text")
return words
changes = 0
for i, pw in enumerate(polished_words):
if pw != words[i]["word"]:
changes += 1
words[i]["word"] = pw
if changes:
print(f" Polished {changes} words")
except Exception as e:
print(f" Polish error: {e}")
return words
def group_words_into_lines(words: list[dict], clip_start: float,
clip_duration: float) -> list[dict]:
"""Group words into timed caption lines for rendering.
Splits at speaker changes so each line has a single, correct speaker label.
Returns list of: {start, end, speaker, words: [{word, highlighted}]}
"""
if not words:
return []
# Group words into display lines (5-7 words per line)
raw_lines = []
current_line = []
# First split at speaker boundaries, then group into display lines
speaker_groups = []
current_group = []
current_speaker = words[0].get("speaker", "")
for w in words:
current_line.append(w)
if len(current_line) >= 6 or w["word"].rstrip().endswith(('.', '?', '!', ',')):
if len(current_line) >= 3:
raw_lines.append(current_line)
current_line = []
if current_line:
if raw_lines and len(current_line) < 3:
raw_lines[-1].extend(current_line)
else:
raw_lines.append(current_line)
speaker = w.get("speaker", "")
if speaker and speaker != current_speaker and current_group:
speaker_groups.append((current_speaker, current_group))
current_group = []
current_speaker = speaker
current_group.append(w)
if current_group:
speaker_groups.append((current_speaker, current_group))
# Now group each speaker's words into display lines (5-7 words)
raw_lines = []
for speaker, group_words in speaker_groups:
current_line = []
for w in group_words:
current_line.append(w)
if len(current_line) >= 6 or w["word"].rstrip().endswith(('.', '?', '!', ',')):
if len(current_line) >= 3:
raw_lines.append((speaker, current_line))
current_line = []
if current_line:
if raw_lines and len(current_line) < 3 and raw_lines[-1][0] == speaker:
raw_lines[-1] = (speaker, raw_lines[-1][1] + current_line)
else:
raw_lines.append((speaker, current_line))
lines = []
for line_words in raw_lines:
for speaker, line_words in raw_lines:
line_start = line_words[0]["start"] - clip_start
line_end = line_words[-1]["end"] - clip_start
@@ -733,7 +826,7 @@ def group_words_into_lines(words: list[dict], clip_start: float,
lines.append({
"start": line_start,
"end": line_end,
"speaker": line_words[0].get("speaker", ""),
"speaker": speaker,
"words": line_words,
})
@@ -1334,6 +1427,9 @@ def main():
clip["start_time"], clip["end_time"],
word_source)
# Polish text with LLM (fix punctuation, capitalization, mishearings)
clip_words = polish_clip_words(clip_words, labeled_transcript)
# Group words into timed caption lines
caption_lines = group_words_into_lines(
clip_words, clip["start_time"], duration

238
upload_clips.py Normal file → Executable file
View File

@@ -1,12 +1,11 @@
#!/usr/bin/env python3
"""Upload podcast clips to social media via Postiz (and direct Bluesky via atproto).
"""Upload podcast clips to social media (direct YouTube & Bluesky, Postiz for others).
Usage:
python upload_clips.py clips/episode-12/
python upload_clips.py clips/episode-12/ --clip 1
python upload_clips.py clips/episode-12/ --platforms ig,yt
python upload_clips.py clips/episode-12/ --schedule "2026-02-16T10:00:00"
python upload_clips.py clips/episode-12/ --yes # skip confirmation
python upload_clips.py # interactive: pick episode, clips, platforms
python upload_clips.py clips/episode-12/ # pick clips and platforms interactively
python upload_clips.py clips/episode-12/ --clip 1 --platforms ig,yt
python upload_clips.py clips/episode-12/ --yes # skip all prompts, upload everything
"""
import argparse
@@ -27,6 +26,9 @@ POSTIZ_URL = os.getenv("POSTIZ_URL", "https://social.lukeattheroost.com")
BSKY_HANDLE = os.getenv("BSKY_HANDLE", "lukeattheroost.bsky.social")
BSKY_APP_PASSWORD = os.getenv("BSKY_APP_PASSWORD")
YT_CLIENT_SECRETS = Path(__file__).parent / "youtube_client_secrets.json"
YT_TOKEN_FILE = Path(__file__).parent / "youtube_token.json"
PLATFORM_ALIASES = {
"ig": "instagram", "insta": "instagram", "instagram": "instagram",
"yt": "youtube", "youtube": "youtube",
@@ -214,6 +216,106 @@ def post_to_bluesky(clip: dict, clip_file: Path) -> bool:
return True
def get_youtube_service():
"""Authenticate with YouTube API. First run opens a browser, then reuses saved token."""
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.discovery import build as yt_build
scopes = ["https://www.googleapis.com/auth/youtube.upload"]
creds = None
if YT_TOKEN_FILE.exists():
creds = Credentials.from_authorized_user_file(str(YT_TOKEN_FILE), scopes)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
if not YT_CLIENT_SECRETS.exists():
print(" Error: youtube_client_secrets.json not found")
print(" Download OAuth2 Desktop App credentials from Google Cloud Console")
return None
flow = InstalledAppFlow.from_client_secrets_file(str(YT_CLIENT_SECRETS), scopes)
creds = flow.run_local_server(port=8090)
with open(YT_TOKEN_FILE, "w") as f:
f.write(creds.to_json())
return yt_build("youtube", "v3", credentials=creds)
def post_to_youtube(clip: dict, clip_file: Path) -> bool:
"""Upload a clip directly to YouTube Shorts via the Data API."""
import time
import random
from googleapiclient.http import MediaFileUpload
from googleapiclient.errors import HttpError
youtube = get_youtube_service()
if not youtube:
return False
title = clip["title"]
if "#Shorts" not in title:
title = f"{title} #Shorts"
description = build_content(clip, "youtube")
if "#Shorts" not in description:
description += "\n\n#Shorts"
tags = [h.lstrip("#") for h in clip.get("hashtags", [])]
if "Shorts" not in tags:
tags.insert(0, "Shorts")
body = {
"snippet": {
"title": title[:100],
"description": description,
"tags": tags,
"categoryId": "24", # Entertainment
},
"status": {
"privacyStatus": "public",
"selfDeclaredMadeForKids": False,
},
}
media = MediaFileUpload(
str(clip_file),
mimetype="video/mp4",
chunksize=256 * 1024,
resumable=True,
)
request = youtube.videos().insert(part="snippet,status", body=body, media_body=media)
file_size = clip_file.stat().st_size / 1_000_000
print(f" Uploading video ({file_size:.1f} MB)...")
response = None
retry = 0
while response is None:
try:
status, response = request.next_chunk()
if status:
print(f" Upload {int(status.progress() * 100)}%...")
except HttpError as e:
if e.resp.status in (500, 502, 503, 504) and retry < 5:
retry += 1
wait = random.random() * (2 ** retry)
print(f" Retrying in {wait:.1f}s...")
time.sleep(wait)
else:
print(f" YouTube API error: {e}")
return False
video_id = response["id"]
print(f" https://youtube.com/shorts/{video_id}")
return True
def create_post(integration_id: str, content: str, media: dict,
settings: dict, schedule: str | None = None) -> dict:
from datetime import datetime, timezone
@@ -253,7 +355,7 @@ def create_post(integration_id: str, content: str, media: dict,
def main():
valid_names = sorted(set(PLATFORM_ALIASES.keys()))
parser = argparse.ArgumentParser(description="Upload podcast clips to social media via Postiz")
parser.add_argument("clips_dir", help="Path to clips directory (e.g. clips/episode-12/)")
parser.add_argument("clips_dir", nargs="?", help="Path to clips directory (e.g. clips/episode-12/). If omitted, shows a picker.")
parser.add_argument("--clip", "-c", type=int, help="Upload only clip N (1-indexed)")
parser.add_argument("--platforms", "-p",
help=f"Comma-separated platforms ({','.join(ALL_PLATFORMS)}). Default: all")
@@ -266,6 +368,75 @@ def main():
print("Error: POSTIZ_API_KEY not set in .env")
sys.exit(1)
# Resolve clips directory — pick interactively if not provided
if args.clips_dir:
clips_dir = Path(args.clips_dir).expanduser().resolve()
else:
clips_root = Path(__file__).parent / "clips"
episode_dirs = sorted(
[d for d in clips_root.iterdir()
if d.is_dir() and not d.name.startswith(".") and (d / "clips-metadata.json").exists()],
key=lambda d: d.name,
)
if not episode_dirs:
print("No clip directories found in clips/. Run make_clips.py first.")
sys.exit(1)
print("\nAvailable episodes:\n")
for i, d in enumerate(episode_dirs):
with open(d / "clips-metadata.json") as f:
meta = json.load(f)
print(f" {i+1}. {d.name} ({len(meta)} clip{'s' if len(meta) != 1 else ''})")
print()
while True:
try:
choice = input("Which episode? ").strip()
idx = int(choice) - 1
if 0 <= idx < len(episode_dirs):
clips_dir = episode_dirs[idx]
break
print(f" Enter 1-{len(episode_dirs)}")
except (ValueError, EOFError):
print(f" Enter an episode number")
metadata_path = clips_dir / "clips-metadata.json"
if not metadata_path.exists():
print(f"Error: No clips-metadata.json found in {clips_dir}")
print("Run make_clips.py first to generate clips and metadata.")
sys.exit(1)
with open(metadata_path) as f:
clips = json.load(f)
# Pick clips
if args.clip:
if args.clip < 1 or args.clip > len(clips):
print(f"Error: Clip {args.clip} not found (have {len(clips)} clips)")
sys.exit(1)
clips = [clips[args.clip - 1]]
elif not args.yes:
print(f"\nFound {len(clips)} clip(s):\n")
for i, clip in enumerate(clips):
desc = clip.get('description', clip.get('caption_text', ''))
if len(desc) > 70:
desc = desc[:desc.rfind(' ', 0, 70)] + '...'
print(f" {i+1}. \"{clip['title']}\" ({clip['duration']:.0f}s)")
print(f" {desc}")
print(f"\n a. All clips")
print()
while True:
choice = input("Which clips? (e.g. 1,3 or a for all): ").strip().lower()
if choice in ('a', 'all'):
break
try:
indices = [int(x.strip()) for x in choice.split(",")]
if all(1 <= x <= len(clips) for x in indices):
clips = [clips[x - 1] for x in indices]
break
print(f" Invalid selection. Enter 1-{len(clips)}, comma-separated, or 'a' for all.")
except (ValueError, EOFError):
print(f" Enter clip numbers (e.g. 1,3) or 'a' for all")
# Pick platforms
if args.platforms:
requested = []
for p in args.platforms.split(","):
@@ -276,28 +447,29 @@ def main():
sys.exit(1)
requested.append(PLATFORM_ALIASES[p])
target_platforms = list(dict.fromkeys(requested))
elif not args.yes:
print(f"\nPlatforms:\n")
for i, p in enumerate(ALL_PLATFORMS):
print(f" {i+1}. {PLATFORM_DISPLAY[p]}")
print(f"\n a. All platforms (default)")
print()
choice = input("Which platforms? (e.g. 1,3,5 or a for all) [a]: ").strip().lower()
if choice and choice not in ('a', 'all'):
try:
indices = [int(x.strip()) for x in choice.split(",")]
target_platforms = [ALL_PLATFORMS[x - 1] for x in indices if 1 <= x <= len(ALL_PLATFORMS)]
if not target_platforms:
target_platforms = ALL_PLATFORMS[:]
except (ValueError, IndexError):
target_platforms = ALL_PLATFORMS[:]
else:
target_platforms = ALL_PLATFORMS[:]
else:
target_platforms = ALL_PLATFORMS[:]
clips_dir = Path(args.clips_dir).expanduser().resolve()
metadata_path = clips_dir / "clips-metadata.json"
if not metadata_path.exists():
print(f"Error: No clips-metadata.json found in {clips_dir}")
print("Run make_clips.py first to generate clips and metadata.")
sys.exit(1)
with open(metadata_path) as f:
clips = json.load(f)
if args.clip:
if args.clip < 1 or args.clip > len(clips):
print(f"Error: Clip {args.clip} not found (have {len(clips)} clips)")
sys.exit(1)
clips = [clips[args.clip - 1]]
DIRECT_PLATFORMS = {"bluesky", "youtube"}
needs_postiz = not args.dry_run and any(
p != "bluesky" for p in target_platforms)
p not in DIRECT_PLATFORMS for p in target_platforms)
if needs_postiz:
print("Fetching connected accounts from Postiz...")
integrations = fetch_integrations()
@@ -312,6 +484,12 @@ def main():
else:
print("Warning: BSKY_APP_PASSWORD not set in .env, skipping Bluesky")
continue
if platform == "youtube":
if YT_CLIENT_SECRETS.exists() or YT_TOKEN_FILE.exists() or args.dry_run:
active_platforms[platform] = {"name": "YouTube Shorts", "_direct": True}
else:
print("Warning: youtube_client_secrets.json not found, skipping YouTube")
continue
if args.dry_run:
active_platforms[platform] = {"name": PLATFORM_DISPLAY[platform]}
continue
@@ -384,6 +562,16 @@ def main():
else:
print(f" {display}: Failed")
if "youtube" in active_platforms:
print(f" Posting to YouTube Shorts (direct)...")
try:
if post_to_youtube(clip, clip_file):
print(f" YouTube: Posted!")
else:
print(f" YouTube: Failed")
except Exception as e:
print(f" YouTube: Failed — {e}")
if "bluesky" in active_platforms:
print(f" Posting to Bluesky (direct)...")
try:

View File

@@ -47,7 +47,7 @@ a:hover {
/* Hero */
.hero {
padding: 3rem 1.5rem 2rem;
padding: 3rem 1.5rem 2.5rem;
max-width: 900px;
margin: 0 auto;
text-align: center;
@@ -57,14 +57,14 @@ a:hover {
display: flex;
flex-direction: column;
align-items: center;
gap: 2rem;
gap: 1.5rem;
}
.cover-art {
width: 220px;
height: 220px;
width: 260px;
height: 260px;
border-radius: var(--radius);
box-shadow: 0 8px 32px rgba(232, 121, 29, 0.35);
box-shadow: 0 8px 32px rgba(232, 121, 29, 0.25);
object-fit: cover;
}
@@ -72,31 +72,32 @@ a:hover {
display: flex;
flex-direction: column;
align-items: center;
gap: 0.75rem;
gap: 0.5rem;
}
.hero h1 {
font-size: 2.5rem;
font-size: 2.8rem;
font-weight: 800;
letter-spacing: -0.02em;
}
.tagline {
font-size: 1.15rem;
font-size: 1.2rem;
color: var(--text-muted);
max-width: 400px;
max-width: 500px;
line-height: 1.5;
}
.phone {
display: flex;
align-items: center;
justify-content: center;
gap: 0.6rem;
margin-top: 0.5rem;
gap: 0.5rem;
margin-top: 0.25rem;
}
.phone-inline {
font-size: 0.95rem;
font-size: 1rem;
color: var(--text-muted);
}
@@ -110,12 +111,12 @@ a:hover {
.on-air-badge {
display: none;
align-items: center;
gap: 0.4rem;
gap: 0.35rem;
background: var(--accent-red);
color: #fff;
padding: 0.25rem 0.75rem;
padding: 0.2rem 0.6rem;
border-radius: 50px;
font-size: 0.7rem;
font-size: 0.65rem;
font-weight: 800;
letter-spacing: 0.12em;
text-transform: uppercase;
@@ -128,8 +129,8 @@ a:hover {
}
.on-air-dot {
width: 8px;
height: 8px;
width: 7px;
height: 7px;
border-radius: 50%;
background: #fff;
animation: on-air-blink 1s step-end infinite;
@@ -149,11 +150,11 @@ a:hover {
.off-air-badge {
display: inline-flex;
align-items: center;
background: #444;
background: rgba(255, 255, 255, 0.08);
color: var(--text-muted);
padding: 0.25rem 0.75rem;
padding: 0.2rem 0.6rem;
border-radius: 50px;
font-size: 0.7rem;
font-size: 0.65rem;
font-weight: 700;
letter-spacing: 0.1em;
text-transform: uppercase;
@@ -169,52 +170,58 @@ a:hover {
text-shadow: 0 0 16px rgba(204, 34, 34, 0.35);
}
/* Subscribe buttons — primary listen platforms */
/* Subscribe — compact inline text links */
.subscribe-row {
display: flex;
flex-direction: column;
align-items: center;
gap: 0.6rem;
margin-top: 1.5rem;
justify-content: center;
gap: 0.5rem;
margin-top: 1rem;
}
.subscribe-label {
font-size: 0.75rem;
font-size: 0.8rem;
color: var(--text-muted);
text-transform: uppercase;
letter-spacing: 0.15em;
letter-spacing: 0.12em;
opacity: 0.6;
}
.subscribe-buttons {
display: flex;
flex-wrap: wrap;
justify-content: center;
gap: 0.5rem;
align-items: center;
gap: 0.15rem;
}
.subscribe-btn {
display: inline-flex;
align-items: center;
gap: 0.4rem;
padding: 0.45rem 1rem;
border-radius: 50px;
font-size: 0.8rem;
gap: 0.35rem;
padding: 0.4rem 0.75rem;
border-radius: 6px;
font-size: 0.9rem;
font-weight: 600;
color: var(--text);
color: var(--text-muted);
background: transparent;
border: 1px solid var(--text-muted);
transition: border-color 0.2s, color 0.2s;
border: none;
transition: color 0.2s;
}
.subscribe-btn:hover {
border-color: var(--accent);
color: var(--accent);
}
.subscribe-btn svg {
width: 14px;
height: 14px;
width: 15px;
height: 15px;
flex-shrink: 0;
opacity: 0.6;
}
.subscribe-btn:hover svg {
opacity: 1;
}
/* Secondary links — How It Works, Discord, Support */
@@ -224,23 +231,35 @@ a:hover {
justify-content: center;
align-items: center;
gap: 0.5rem;
margin-top: 0.75rem;
margin-top: 0.25rem;
}
.secondary-link {
font-size: 0.8rem;
font-size: 0.85rem;
color: var(--text-muted);
transition: color 0.2s;
opacity: 0.6;
transition: color 0.2s, opacity 0.2s;
}
.secondary-link:hover {
color: var(--accent);
opacity: 1;
}
.secondary-sep {
color: var(--text-muted);
opacity: 0.4;
font-size: 0.8rem;
opacity: 0.3;
font-size: 0.85rem;
}
.support-link {
color: var(--accent);
opacity: 1;
font-weight: 600;
}
.support-link:hover {
color: var(--accent-hover);
}
/* Episodes */
@@ -1201,13 +1220,15 @@ a:hover {
/* Desktop */
@media (min-width: 768px) {
.hero {
padding: 4rem 2rem 2.5rem;
padding: 3.5rem 2rem 2.5rem;
max-width: 1000px;
}
.hero-inner {
flex-direction: row;
text-align: left;
gap: 3rem;
gap: 2.5rem;
align-items: center;
}
.hero-info {
@@ -1215,8 +1236,17 @@ a:hover {
}
.cover-art {
width: 260px;
height: 260px;
width: 280px;
height: 280px;
flex-shrink: 0;
}
.hero h1 {
font-size: 2.8rem;
}
.phone {
justify-content: flex-start;
}
.subscribe-row {

View File

@@ -109,9 +109,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div>
</div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p>
<p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@lukeattheroost.com">luke@lukeattheroost.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer>
<!-- Sticky Audio Player -->

View File

@@ -69,6 +69,117 @@
<p class="page-subtitle">Every caller on the show is a one-of-a-kind character — generated in real time by a custom-built AI system. Here's a peek behind the curtain.</p>
</section>
<!-- Steps -->
<section class="hiw-section">
<h2>The Anatomy of an AI Caller</h2>
<div class="hiw-steps">
<div class="hiw-step">
<div class="hiw-step-number">1</div>
<div class="hiw-step-content">
<h3>A Person Is Born</h3>
<p>Every caller starts as a blank slate. The system generates a complete identity: name, age, job, hometown, and personality. Each caller gets a unique speaking style — some ramble, some are blunt, some deflect with humor. They have relationships, vehicles, strong food opinions, nostalgic memories, and reasons for being up this late. They know what they were watching on TV, what errand they ran today, and what song was on the radio before they called.</p>
<p>Some callers become regulars. The system tracks returning callers across episodes — they remember past conversations, reference things they talked about before, and their stories evolve over time. You'll hear Carla update you on her divorce, or Carl check in about his gambling recovery. They're not reset between shows.</p>
<p>And some callers are drunk, high, or flat-out unhinged. They'll call with conspiracy theories about pigeons being government drones, existential crises about whether fish know they're wet, or to confess they accidentally set their kitchen on fire trying to make grilled cheese at 3 AM.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Unique Names</span>
<span class="hiw-detail-value">320</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Personality Layers</span>
<span class="hiw-detail-value">189+</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Towns with Real Knowledge</span>
<span class="hiw-detail-value">20</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Returning Regulars</span>
<span class="hiw-detail-value">12 callers</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">2</div>
<div class="hiw-step-content">
<h3>They Know Their World</h3>
<p>Callers know real facts about where they live — the restaurants, the highways, the local gossip. When a caller says they're from Lordsburg, they actually know about the Shakespeare ghost town and the drive to Deming. They know the current weather outside their window, what day of the week it is, whether it's monsoon season or chile harvest. They have strong opinions about where to get the best green chile and get nostalgic about how their town used to be. The system also pulls in real-time news so callers can reference things that actually happened today.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">3</div>
<div class="hiw-step-content">
<h3>They Have a Reason to Call</h3>
<p>Some callers have a problem — a fight with a neighbor, a situation at work, something weighing on them at 2 AM. Others call to geek out about Severance, argue about poker strategy, or share something they read about quantum physics. The system draws from over 570 discussion topics across dozens of categories and more than 1,400 life scenarios. Every caller has a purpose, not just a script.</p>
<div class="hiw-split-stat">
<div class="hiw-stat">
<span class="hiw-stat-number">70%</span>
<span class="hiw-stat-label">Need advice</span>
</div>
<div class="hiw-stat">
<span class="hiw-stat-number">30%</span>
<span class="hiw-stat-label">Want to talk about something</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">4</div>
<div class="hiw-step-content">
<h3>The Conversation Is Real</h3>
<p>Luke talks to each caller using push-to-talk, just like a real radio show. His voice is transcribed in real time, sent to an AI that responds in character, and then converted to speech using a voice engine — all in a few seconds. The AI doesn't just answer questions; it reacts, gets emotional, goes on tangents, and remembers what was said earlier in the show. Callers even react to previous callers — "Hey Luke, I heard that guy Tony earlier and I got to say, he's full of it." It makes the show feel like a living community, not isolated calls.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">5</div>
<div class="hiw-step-content">
<h3>Real Callers Call In Too</h3>
<p>When you dial 208-439-LUKE, your call goes into a live queue. Luke sees you waiting and can take your call right from the control room. Your voice streams in real time — no pre-recording, no delay. You're live on the show, talking to Luke, and the AI callers might even react to what you said. And if Luke isn't live, you can leave a voicemail — it gets transcribed and may get played on a future episode.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">6</div>
<div class="hiw-step-content">
<h3>Listener Emails</h3>
<p>Listeners can send emails to <a href="mailto:submissions@lukeattheroost.com" style="color:var(--accent)">submissions@lukeattheroost.com</a> and have them read on the show. A background poller checks for new messages every 30 seconds — they show up in the control room as soon as they arrive. Luke can read them himself on the mic, or hit a button to have an AI voice read them aloud on the caller channel. It's like a call-in show meets a letters segment — listeners who can't call in can still be part of the conversation.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">7</div>
<div class="hiw-step-content">
<h3>The Control Room</h3>
<p>The entire show runs through a custom-built control panel. Luke manages callers, plays music and sound effects, runs ads, monitors the call queue, and controls everything from one screen. Audio is routed across multiple channels simultaneously — caller voices, music, sound effects, and live phone audio all on separate tracks. The website shows a live on-air indicator so listeners know when to call in.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Audio Channels</span>
<span class="hiw-detail-value">5 independent</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Caller Slots</span>
<span class="hiw-detail-value">10 per session</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Phone System</span>
<span class="hiw-detail-value">VoIP + WebSocket</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Live Status</span>
<span class="hiw-detail-value">Real-time CDN</span>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Overview -->
<section class="hiw-section">
<div class="hiw-card hiw-hero-card">
@@ -100,6 +211,12 @@
</div>
<span>Voicemails</span>
</div>
<div class="diagram-box diagram-accent">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M4 4h16c1.1 0 2 .9 2 2v12c0 1.1-.9 2-2 2H4c-1.1 0-2-.9-2-2V6c0-1.1.9-2 2-2z"/><polyline points="22,6 12,13 2,6"/></svg>
</div>
<span>Listener Emails</span>
</div>
</div>
<div class="diagram-arrow">&#8595;</div>
<!-- Row 2: Control Room -->
@@ -211,6 +328,12 @@
</div>
<span>Social Clips</span>
</div>
<div class="diagram-box">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M22 12h-4l-3 9L9 3l-3 9H2"/></svg>
</div>
<span>Monitoring</span>
</div>
</div>
<div class="diagram-arrow">&#8595;</div>
<!-- Row 7: Distribution -->
@@ -281,115 +404,13 @@
</div>
</section>
<!-- Steps -->
<section class="hiw-section">
<h2>The Anatomy of an AI Caller</h2>
<div class="hiw-steps">
<div class="hiw-step">
<div class="hiw-step-number">1</div>
<div class="hiw-step-content">
<h3>A Person Is Born</h3>
<p>Every caller starts as a blank slate. The system generates a complete identity: name, age, job, hometown, and personality. Each caller gets a unique speaking style — some ramble, some are blunt, some deflect with humor. They have relationships, vehicles, strong food opinions, nostalgic memories, and reasons for being up this late. They know what they were watching on TV, what errand they ran today, and what song was on the radio before they called.</p>
<p>Some callers become regulars. The system tracks returning callers across episodes — they remember past conversations, reference things they talked about before, and their stories evolve over time. You'll hear Carla update you on her divorce, or Carl check in about his gambling recovery. They're not reset between shows.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Unique Names</span>
<span class="hiw-detail-value">160 names</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Personality Layers</span>
<span class="hiw-detail-value">30+</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Towns with Real Knowledge</span>
<span class="hiw-detail-value">32</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Returning Regulars</span>
<span class="hiw-detail-value">12+ callers</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">2</div>
<div class="hiw-step-content">
<h3>They Know Their World</h3>
<p>Callers know real facts about where they live — the restaurants, the highways, the local gossip. When a caller says they're from Lordsburg, they actually know about the Shakespeare ghost town and the drive to Deming. They know the current weather outside their window, what day of the week it is, whether it's monsoon season or chile harvest. They have strong opinions about where to get the best green chile and get nostalgic about how their town used to be. The system also pulls in real-time news so callers can reference things that actually happened today.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">3</div>
<div class="hiw-step-content">
<h3>They Have a Reason to Call</h3>
<p>Some callers have a problem — a fight with a neighbor, a situation at work, something weighing on them at 2 AM. Others call to geek out about Severance, argue about poker strategy, or share something they read about quantum physics. Every caller has a purpose, not just a script.</p>
<div class="hiw-split-stat">
<div class="hiw-stat">
<span class="hiw-stat-number">70%</span>
<span class="hiw-stat-label">Need advice</span>
</div>
<div class="hiw-stat">
<span class="hiw-stat-number">30%</span>
<span class="hiw-stat-label">Want to talk about something</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">4</div>
<div class="hiw-step-content">
<h3>The Conversation Is Real</h3>
<p>Luke talks to each caller using push-to-talk, just like a real radio show. His voice is transcribed in real time, sent to an AI that responds in character, and then converted to speech using a voice engine — all in a few seconds. The AI doesn't just answer questions; it reacts, gets emotional, goes on tangents, and remembers what was said earlier in the show. Callers even react to previous callers — "Hey Luke, I heard that guy Tony earlier and I got to say, he's full of it." It makes the show feel like a living community, not isolated calls.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">5</div>
<div class="hiw-step-content">
<h3>Real Callers Call In Too</h3>
<p>When you dial 208-439-LUKE, your call goes into a live queue. Luke sees you waiting and can take your call right from the control room. Your voice streams in real time — no pre-recording, no delay. You're live on the show, talking to Luke, and the AI callers might even react to what you said.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">6</div>
<div class="hiw-step-content">
<h3>The Control Room</h3>
<p>The entire show runs through a custom-built control panel. Luke manages callers, plays music and sound effects, runs ads, monitors the call queue, and controls everything from one screen. Audio is routed across multiple channels simultaneously — caller voices, music, sound effects, and live phone audio all on separate tracks. The website shows a live on-air indicator so listeners know when to call in.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Audio Channels</span>
<span class="hiw-detail-value">5 independent</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Caller Slots</span>
<span class="hiw-detail-value">10 per session</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Phone System</span>
<span class="hiw-detail-value">VoIP + WebSocket</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Live Status</span>
<span class="hiw-detail-value">Real-time CDN</span>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Post-Production Pipeline -->
<section class="hiw-section">
<h2>From Live Show to Podcast</h2>
<div class="hiw-steps">
<div class="hiw-step">
<div class="hiw-step-number">7</div>
<div class="hiw-step-number">8</div>
<div class="hiw-step-content">
<h3>Multi-Stem Recording</h3>
<p>During every show, the system records five separate audio stems simultaneously: host microphone, AI caller voices, music, sound effects, and ads. Each stem is captured as an independent WAV file with sample-accurate alignment. This gives full control over the final mix — like having a recording studio's multitrack session, not just a flat recording.</p>
@@ -415,7 +436,7 @@
</div>
<div class="hiw-step">
<div class="hiw-step-number">8</div>
<div class="hiw-step-number">9</div>
<div class="hiw-step-content">
<h3>Post-Production Pipeline</h3>
<p>Once the show ends, a 15-step automated pipeline processes the raw stems into a broadcast-ready episode. Ads and sound effects are hard-limited to prevent clipping. The host mic gets a high-pass filter, de-essing, and breath reduction. Voice tracks are compressed — the host gets aggressive spoken-word compression for consistent levels, callers get telephone EQ to sound like real phone calls. All stems are level-matched, music is ducked under dialog and muted during ads, then everything is mixed to stereo with panning and width. A bus compressor glues the final mix together before silence trimming, fades, and EBU R128 loudness normalization.</p>
@@ -441,14 +462,14 @@
</div>
<div class="hiw-step">
<div class="hiw-step-number">9</div>
<div class="hiw-step-number">10</div>
<div class="hiw-step-content">
<h3>Automated Publishing</h3>
<p>A single command takes a finished episode and handles everything: the audio is transcribed using speech recognition to generate full-text transcripts, then an LLM analyzes the transcript to write the episode title, description, and chapter markers with timestamps. The episode is uploaded to the podcast server, chapters and transcripts are attached to the metadata, and all media is synced to a global CDN so listeners everywhere get fast downloads.</p>
<p>A single command takes a finished episode and handles everything: the audio is transcribed using MLX Whisper running on Apple Silicon GPU to generate full-text transcripts, then an LLM analyzes the transcript to write the episode title, description, and chapter markers with timestamps. The episode is uploaded to the podcast server, chapters and transcripts are attached to the metadata, and all media is synced to a global CDN so listeners everywhere get fast downloads.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Transcription</span>
<span class="hiw-detail-value">Whisper AI</span>
<span class="hiw-detail-value">MLX Whisper (GPU)</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Metadata</span>
@@ -467,10 +488,10 @@
</div>
<div class="hiw-step">
<div class="hiw-step-number">10</div>
<div class="hiw-step-number">11</div>
<div class="hiw-step-content">
<h3>Automated Social Clips</h3>
<p>No manual editing, no scheduling tools. After each episode, an LLM reads the full transcript and picks the best moments — funny exchanges, wild confessions, heated debates. Each clip is automatically extracted, captioned with word-level timing, and rendered as a vertical video with the show's branding. A second LLM pass writes platform-specific descriptions and hashtags. Then a single script blasts every clip to Instagram Reels, YouTube Shorts, Facebook, Bluesky, and Mastodon simultaneously — six platforms, zero manual work.</p>
<p>No manual editing, no scheduling tools. After each episode, an LLM reads the full transcript and picks the best moments — funny exchanges, wild confessions, heated debates. Each clip is automatically extracted, transcribed with word-level timestamps, then polished by a second LLM pass that fixes punctuation, capitalization, and misheard words while preserving timing. The clips are rendered as vertical video with speaker-labeled captions and the show's branding. A third LLM writes platform-specific descriptions and hashtags. Then clips are uploaded directly to YouTube Shorts and Bluesky via their APIs, and pushed to Instagram Reels, Facebook, and Mastodon — six platforms, zero manual work.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Human Effort</span>
@@ -482,7 +503,7 @@
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Captions</span>
<span class="hiw-detail-value">Word-level sync</span>
<span class="hiw-detail-value">LLM-polished</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Simultaneous Push</span>
@@ -493,7 +514,7 @@
</div>
<div class="hiw-step">
<div class="hiw-step-number">11</div>
<div class="hiw-step-number">12</div>
<div class="hiw-step-content">
<h3>Global Distribution</h3>
<p>Episodes are served through a CDN edge network for fast, reliable playback worldwide. The RSS feed is automatically updated and picked up by Spotify, Apple Podcasts, YouTube, and every other podcast app. The website pulls the live feed to show episodes with embedded playback, full transcripts, and chapter navigation — all served through Cloudflare with edge caching. From recording to available on every platform, the whole pipeline is automated end-to-end.</p>
@@ -576,7 +597,7 @@
<div class="hiw-cta-phone">
Or call in live: <strong>208-439-LUKE</strong>
</div>
<a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener" class="hiw-cta-support">Support the show on Ko-fi</a>
<a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener" class="hiw-cta-support">Support the Show</a>
</section>
<!-- Footer -->
@@ -602,9 +623,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div>
</div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p>
<p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@lukeattheroost.com">luke@lukeattheroost.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer>
</body>

View File

@@ -117,12 +117,20 @@
<span class="secondary-sep">&middot;</span>
<a href="https://discord.gg/5CnQZxDM" target="_blank" rel="noopener" class="secondary-link">Discord</a>
<span class="secondary-sep">&middot;</span>
<a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener" class="secondary-link">Support the Show</a>
<a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener" class="secondary-link support-link">Support the Show</a>
</div>
</div>
</div>
</section>
<!-- Episodes -->
<section class="episodes-section">
<h2>Episodes</h2>
<div class="episodes-list" id="episodes-list">
<div class="episodes-loading">Loading episodes...</div>
</div>
</section>
<!-- Testimonials -->
<section class="testimonials-section">
<h2>What Callers Are Saying</h2>
@@ -197,14 +205,6 @@
<div class="testimonials-dots" id="testimonials-dots"></div>
</section>
<!-- Episodes -->
<section class="episodes-section">
<h2>Episodes</h2>
<div class="episodes-list" id="episodes-list">
<div class="episodes-loading">Loading episodes...</div>
</div>
</section>
<!-- Footer -->
<footer class="footer">
<div class="footer-links">
@@ -228,9 +228,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div>
</div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p>
<p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@lukeattheroost.com">luke@lukeattheroost.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer>
<!-- Sticky Audio Player -->

View File

@@ -19,4 +19,4 @@ Luke at the Roost is a late-night call-in radio show hosted by Luke MacNeil, bro
- Host: Luke MacNeil
- Genre: Comedy
- Format: Live call-in radio show
- Contact: luke@macneilmediagroup.com
- Contact: luke@lukeattheroost.com

View File

@@ -71,14 +71,14 @@
<p>Our content is rated explicit and is not directed at children under 13. We do not knowingly collect personal information from children.</p>
<h2>Your Rights</h2>
<p>If you have questions about your data or want to request removal of your voice from a published episode, contact us at <a href="mailto:luke@macneilmediagroup.com" style="color: var(--accent, #d4a44a);">luke@macneilmediagroup.com</a>.</p>
<p>If you have questions about your data or want to request removal of your voice from a published episode, contact us at <a href="mailto:luke@lukeattheroost.com" style="color: var(--accent, #d4a44a);">luke@lukeattheroost.com</a>.</p>
<h2>Changes</h2>
<p>We may update this policy from time to time. Changes will be posted on this page with an updated date.</p>
<h2>Contact</h2>
<p>MacNeil Media Group<br>
Email: <a href="mailto:luke@macneilmediagroup.com" style="color: var(--accent, #d4a44a);">luke@macneilmediagroup.com</a></p>
Email: <a href="mailto:luke@lukeattheroost.com" style="color: var(--accent, #d4a44a);">luke@lukeattheroost.com</a></p>
</div>
</section>
@@ -106,9 +106,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div>
</div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p>
<p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@lukeattheroost.com">luke@lukeattheroost.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer>
</body>

View File

@@ -78,9 +78,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div>
</div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p>
<p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@lukeattheroost.com">luke@lukeattheroost.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer>
<script>