UI cleanup, Devon overhaul, bug fixes, publish ep36

- Fix Devon double messages, add conversation persistence, voice-to-Devon when no caller
- Devon personality: weird/lovable intern on first day, handles name misspellings
- Fix caller gender/avatar mismatch (avatar seed includes gender)
- Reserve Sebastian voice for Silas, ban "eating at me" phrase harder
- Callers now hear Devon's commentary in conversation context
- CSS cleanup: expand compressed blocks, remove inline styles, fix Devon color to warm tawny
- Reaper silence threshold 7s → 6s
- Publish episode 36

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-14 16:42:21 -06:00
parent 6d4e490283
commit 3329cf9ac2
11 changed files with 2300 additions and 187 deletions

View File

@@ -30,6 +30,29 @@ from .services.stem_recorder import StemRecorder
from .services.news import news_service, extract_keywords, STOP_WORDS
from .services.regulars import regular_caller_service
from .services.intern import intern_service
from .services.avatars import avatar_service
# --- Structured Caller Background (must be defined before functions that use it) ---
@dataclass
class CallerBackground:
name: str
age: int
gender: str
job: str
location: str | None
reason_for_calling: str
pool_name: str
communication_style: str
energy_level: str # low / medium / high / very_high
emotional_state: str # nervous, excited, angry, vulnerable, calm, etc.
signature_detail: str # The memorable thing about them
situation_summary: str # 1-sentence summary for other callers to reference
natural_description: str # 3-5 sentence prose for the prompt
seeds: list[str] = field(default_factory=list)
verbal_fluency: str = "medium"
calling_from: str = ""
app = FastAPI(title="AI Radio Show")
@@ -123,7 +146,7 @@ ELEVENLABS_MALE_VOICES.append("SAz9YHcvj6GT2YYXdXww") # River - Neutral
ELEVENLABS_FEMALE_VOICES.append("SAz9YHcvj6GT2YYXdXww") # River - Neutral
# Voices to never assign to callers (annoying, bad quality, etc.)
BLACKLISTED_VOICES = {"Evelyn"}
BLACKLISTED_VOICES = {"Evelyn", "Sebastian"} # Sebastian reserved for Silas
def _get_voice_pools():
@@ -2224,6 +2247,96 @@ BEFORE_CALLING = [
"Was at the 24-hour gym, basically empty, radio on over the speakers.",
]
# Where callers are physically calling from — picked as a seed for the LLM prompt.
# NOT every caller mentions this. Only ~40% do.
CALLING_FROM = [
# --- Driving / pulled over (Southwest routes) ---
"driving south on I-10 past the Deming exit",
"on NM-146 heading toward Animas",
"pulled over on I-10 near the Arizona line",
"on 80 south coming through the Peloncillos",
"driving I-10 between Lordsburg and Deming, middle of nowhere",
"parked at a rest stop between here and Tucson",
"pulled off on NM-9 south of Hachita, nothing around for miles",
"driving back from Silver City on NM-90",
"on I-10 west of San Simon, about to cross into New Mexico",
"sitting in the truck at the Road Forks exit",
"driving NM-180 toward the Gila, no cell service in ten minutes",
"on the 80 heading north out of Douglas",
"pulled over on NM-338 in the Animas Valley, stars are insane right now",
# --- Real landmarks / businesses ---
"parked outside the Horseshoe Cafe in Lordsburg",
"at the truck stop on I-10 near Lordsburg",
"in the Walmart parking lot in Deming",
"at the gas station in Road Forks",
"sitting outside the Jalisco Cafe in Lordsburg",
"at the Butterfield Brewing taproom in Deming",
"in the parking lot of the Gadsden Hotel in Douglas",
"at the Copper Queen in Bisbee, on the porch",
"outside Caliche's in Las Cruces",
"in the lot at Rockhound State Park, couldn't sleep",
"parked at Elephant Butte, the lake is dead quiet",
"at the hot springs in Truth or Consequences",
"outside the feed store in Animas",
# --- Home locations ---
"kitchen table",
"back porch, barefoot",
"garage with the door open",
"in the bathtub, phone balanced on the edge",
"bed, staring at the ceiling",
"couch with the TV on mute",
"spare bedroom so they don't wake anyone up",
"front porch, smoking",
"on the floor of the hallway, only spot with reception",
"in the closet because the walls are thin",
"backyard, sitting in a lawn chair in the dark",
"kitchen, cleaning up dinner nobody ate",
# --- Work locations ---
"break room at the plant",
"truck cab between deliveries",
"office after everyone left",
"guard shack",
"shop floor during downtime, machines still humming",
"in the walk-in cooler because it's the only quiet spot",
"cab of the loader, parked for the night",
"nurses' station, graveyard shift",
"back of the restaurant after close, mopping",
"dispatch office, radio quiet for once",
"fire station, between calls",
"in the stockroom sitting on a pallet",
# --- Public places ---
"laundromat, waiting on the dryer",
"24-hour diner booth, coffee going cold",
"hospital waiting room",
"motel room on I-10",
"gym parking lot, just sitting in the car",
"outside a bar, didn't go in",
"gas station parking lot, engine running",
"sitting on the tailgate at a trailhead",
"library parking lot in Silver City",
"outside the Dollar General, only place open",
"airport in El Paso, flight delayed",
"Greyhound station, waiting on a bus that's two hours late",
# --- Unusual / specific ---
"on the roof",
"in a deer blind, been out here since four",
"parked at the cemetery",
"on the tailgate watching the stars, can see the whole Milky Way",
"at a campsite in the Gila, fire's almost out",
"sitting on the hood of the car at a pulloff on NM-152",
"in a horse trailer, don't ask",
"under the carport because the house is too loud",
"on the levee by the river, no one around",
"at the rodeo grounds, everything's closed up but they haven't left",
"at a rest area on I-25, halfway to Albuquerque",
"in a storage unit, organizing their life at midnight",
]
# Specific memories or stories they can reference
MEMORIES = [
"The time they got caught in a flash flood near the Animas Valley and thought they weren't going to make it.",
@@ -4983,7 +5096,7 @@ def generate_caller_background(base: dict) -> CallerBackground | str:
natural_description=result,
seeds=[interest1, interest2, quirk1, opinion],
verbal_fluency="medium",
calling_from="",
calling_from=random.choice(CALLING_FROM) if random.random() < 0.4 else "",
)
@@ -5050,6 +5163,10 @@ async def _generate_caller_background_llm(base: dict) -> CallerBackground | str:
if random.random() < 0.3:
seeds.append(random.choice(MEMORIES))
# ~40% of callers mention where they're calling from
include_calling_from = random.random() < 0.4
calling_from_seed = random.choice(CALLING_FROM) if include_calling_from else None
time_ctx = _get_time_context()
season_ctx = _get_seasonal_context()
@@ -5081,10 +5198,11 @@ async def _generate_caller_background_llm(base: dict) -> CallerBackground | str:
}[fluency]
location_line = f"\nLOCATION: {location}" if location else ""
calling_from_line = f"\nCALLING FROM: {calling_from_seed}" if calling_from_seed else ""
prompt = f"""Write a brief character description for a caller on a late-night radio show set in the rural southwest (New Mexico/Arizona border region).
CALLER: {name}, {age}, {gender}
JOB: {job}{location_line}
JOB: {job}{location_line}{calling_from_line}
WHY THEY'RE CALLING: {reason}
TIME: {time_ctx} {season_ctx}
{age_speech}
@@ -5094,15 +5212,15 @@ TIME: {time_ctx} {season_ctx}
Respond with a JSON object containing these fields:
- "natural_description": 3-5 sentences describing this person in third person as a character brief. The "WHY THEY'RE CALLING" is the core — build everything around it. Make it feel like a real person with a real situation. Jump straight into the situation. What happened? What's the mess? Include where they're calling from (NOT always truck/porch — kitchens, break rooms, laundromats, diners, motel rooms, the gym, a bar, walking down the road, etc).
- "natural_description": 3-5 sentences describing this person in third person as a character brief. The "WHY THEY'RE CALLING" is the core — build everything around it. Make it feel like a real person with a real situation. Jump straight into the situation. What happened? What's the mess?{' Work in where they are calling from — it adds texture.' if calling_from_seed else ' Do NOT mention where they are calling from — not every caller does.'}
- "emotional_state": One word for how they're feeling right now (e.g. "nervous", "furious", "giddy", "defeated", "wired", "numb", "amused", "desperate", "smug").
- "signature_detail": ONE specific memorable thing — a catchphrase, habit, running joke, strong opinion about something trivial, or unique life circumstance. The thing listeners would remember.
- "situation_summary": ONE sentence summarizing their situation that another caller could react to (e.g. "caught her neighbor stealing her mail and retaliated by stealing his garden gnomes").
- "calling_from": Where they physically are right now (e.g. "kitchen table", "break room at the plant", "laundromat on 4th street", "parked outside Denny's").
- "calling_from": Where they physically are right now.{f' Use: "{calling_from_seed}"' if calling_from_seed else ' Leave empty string "" — this caller does not mention their location.'}
WHAT MAKES A GOOD CALLER: Stories that are SPECIFIC, SURPRISING, and make you lean in. Absurd situations, moral dilemmas, petty feuds, workplace chaos, ridiculous coincidences, funny+terrible confessions, callers who might be the villain and don't see it.
DO NOT WRITE: Generic revelations, adoption/DNA/paternity surprises, vague emotional processing, therapy-speak, "sitting in truck staring at nothing," or "everything they thought they knew was a lie."
DO NOT WRITE: Generic revelations, adoption/DNA/paternity surprises, vague emotional processing, therapy-speak, "sitting in truck staring at nothing," "everything they thought they knew was a lie," or ANY variation of "went to the wrong funeral" — that premise has been done to death on this show.
Output ONLY valid JSON, no markdown fences."""
@@ -5171,6 +5289,13 @@ async def _pregenerate_backgrounds():
print(f"[Background] Pre-generated {len(session.caller_backgrounds)} caller backgrounds")
# Pre-fetch avatars for all callers in parallel
avatar_callers = [
{"name": base["name"], "gender": base.get("gender", "male")}
for base in CALLER_BASES.values()
]
await avatar_service.prefetch_batch(avatar_callers)
# Re-assign voices to match caller styles
_match_voices_to_styles()
@@ -5682,7 +5807,8 @@ Layer your reveals naturally:
Don't dump everything at once. Don't say "and it gets worse." Just answer his questions honestly and let each answer land before adding the next layer.
CRITICAL — DO NOT DO ANY OF THESE:
- Don't open with "this is what's eating me" or "this is what's been keeping me up at night" — just start the story
- NEVER say any variation of "eating me" or "eating at me" — this phrase is BANNED on the show
- Don't open with "this is what's been keeping me up at night" — just start the story
- Don't signal your reveals: no "here's where it gets weird," "okay but this is the part," "and this is the kicker"
- Don't narrate your feelings — show them through how you react to Luke's reactions""",
@@ -5735,7 +5861,7 @@ KEEP IT TIGHT. Match Luke's energy. If he's quick, you're quick. If he riffs, gi
Option A — TRIVIAL TO DEEP: You start with something that sounds petty or mundane — a complaint about a coworker, an argument about where to eat, a dispute about a parking spot. But as Luke digs in, it becomes clear this small thing is a proxy for something much bigger. The parking spot fight is really about your marriage falling apart. The coworker complaint is really about being overlooked your whole life. You don't pivot dramatically — it just LEAKS OUT. You might not even realize the connection until Luke points it out.
Option B — DEEP TO PETTY: You call sounding intense and emotional. "I need to talk about my relationship. It's been eating at me." You build tension. And then the reveal is... absurdly small. Your partner puts ketchup on eggs. Your spouse loads the dishwasher wrong. You fully understand how ridiculous it is, but it GENUINELY bothers you and you can't explain why. Play it straight — this is real to you.
Option B — DEEP TO PETTY: You call sounding intense and emotional. "I need to talk about my relationship. I can't take it anymore." You build tension. And then the reveal is... absurdly small. Your partner puts ketchup on eggs. Your spouse loads the dishwasher wrong. You fully understand how ridiculous it is, but it GENUINELY bothers you and you can't explain why. Play it straight — this is real to you.
Pick whichever direction fits your background. Don't telegraph it. Let it unfold naturally.""",
@@ -5799,7 +5925,8 @@ def get_caller_prompt(caller: dict, show_history: str = "",
story_block = """YOUR STORY: Something real, specific, and genuinely surprising — the kind of thing that makes someone stop what they're doing and say "wait, WHAT?" Not a generic life problem. Not a therapy-session monologue. A SPECIFIC SITUATION with specific people, specific details, and a twist or complication that makes it interesting to hear about. The best calls have something unexpected — an ironic detail, a moral gray area, a situation that's funny and terrible at the same time, or a revelation that changes everything. You're not here to vent about your feelings in the abstract. You're here because something HAPPENED and you need to talk it through.
CRITICAL — DO NOT DO ANY OF THESE:
- Don't open with "this is what's eating me" or "this is what's been keeping me up at night" or "I've got something I need to get off my chest" — just TELL THE STORY
- NEVER say any variation of "eating me" or "eating at me" — this phrase is BANNED on the show
- Don't open with "this is what's keeping me up at night" or "I've got something I need to get off my chest" — just TELL THE STORY
- Don't start with a long emotional preamble about how conflicted you feel — lead with the SITUATION
- Don't make your whole call about just finding out you were adopted, a generic family secret, or a vague "everything I thought I knew was a lie" — those are OVERDONE
- Don't be a walking cliché — no "sat in my truck and cried," no "I don't even know who I am anymore," no "I've been carrying this weight"
@@ -5845,34 +5972,19 @@ Southwest voice — "over in," "the other day," "down the road" — but don't fo
Don't repeat yourself. Don't summarize what you already said. Don't circle back if the host moved on. Keep it moving.
BANNED PHRASES — never use these: "that hit differently," "hits different," "I felt that," "it is what it is," "living my best life," "no cap," "lowkey/highkey," "rent free," "main character energy," "I'm not gonna lie," "vibe check," "that's valid," "unpack that," "at the end of the day," "it's giving," "slay," "this is what's eating me," "what's been eating me," "what's keeping me up," "keeping me up at night," "I need to get this off my chest," "I've been carrying this," "everything I thought I knew," "I don't even know who I am anymore," "I've been sitting with this," "I just need someone to hear me," "I don't even know where to start," "it's complicated," "I'm not even mad I'm just disappointed," "that's a whole mood," "I can't even," "on a serious note," "to be fair," "I'm literally shaking," "let that sink in," "normalize," "toxic," "red flag," "gaslight," "boundaries," "safe space," "triggered," "my truth," "authentic self," "healing journey," "I'm doing the work," "manifesting," "energy doesn't lie." These are overused internet phrases, therapy buzzwords, and radio clichés — real people on late-night radio don't talk like Twitter threads or therapy sessions.
BANNED PHRASES — NEVER use any of these. If you catch yourself about to say one, say something else instead. This is a HARD rule, not a suggestion:
- Radio caller clichés: ANY variation of "eating me" or "eating at me" (e.g. "this is what's eating me," "what's been eating me," "here's what's eating at me," "it's eating me up," "been eating at me"), "what's keeping me up," "keeping me up at night," "I need to get this off my chest," "I've been carrying this," "I've been sitting with this," "I just need someone to hear me," "I don't even know where to start," "it's complicated," "I've got something I need to get off my chest," "here's the thing Luke," "Jesus Luke," "Luke I gotta tell you," "man oh man," "you're not gonna believe this," "so get this"
- Therapy buzzwords: "unpack that," "boundaries," "safe space," "triggered," "my truth," "authentic self," "healing journey," "I'm doing the work," "manifesting," "energy doesn't lie," "processing," "toxic," "red flag," "gaslight," "normalize"
- Internet slang: "that hit differently," "hits different," "I felt that," "it is what it is," "living my best life," "no cap," "lowkey/highkey," "rent free," "main character energy," "vibe check," "that's valid," "it's giving," "slay," "that's a whole mood," "I can't even"
- Overused reactions: "I'm not gonna lie," "on a serious note," "to be fair," "I'm literally shaking," "let that sink in," "I'm not even mad I'm just disappointed," "everything I thought I knew," "I don't even know who I am anymore"
IMPORTANT: Each caller should have their OWN way of talking. Don't fall into generic "radio caller" voice. A nervous caller fumbles differently than an angry caller rants. A storyteller meanders differently than a deadpan caller delivers. Match the communication style — don't default to the same phrasing every call.
{speech_block}
NEVER mention minors in sexual context. Output spoken words only — no parenthetical actions like (laughs) or (sighs), no asterisk actions like *pauses*, no stage directions, no gestures. Just say what you'd actually say out loud on the phone. Use "United States" not "US" or "USA". Use full state names not abbreviations."""
# --- Structured Caller Background ---
@dataclass
class CallerBackground:
name: str
age: int
gender: str
job: str
location: str | None
reason_for_calling: str
pool_name: str
communication_style: str
energy_level: str # low / medium / high / very_high
emotional_state: str # nervous, excited, angry, vulnerable, calm, etc.
signature_detail: str # The memorable thing about them
situation_summary: str # 1-sentence summary for other callers to reference
natural_description: str # 3-5 sentence prose for the prompt
seeds: list[str] = field(default_factory=list)
verbal_fluency: str = "medium"
calling_from: str = ""
# --- Session State ---
@dataclass
class CallRecord:
@@ -6607,6 +6719,7 @@ async def startup():
restored = _load_checkpoint()
if not restored:
asyncio.create_task(_pregenerate_backgrounds())
asyncio.create_task(avatar_service.ensure_devon())
threading.Thread(target=_update_on_air_cdn, args=(False,), daemon=True).start()
@@ -6640,6 +6753,7 @@ async def shutdown():
frontend_dir = Path(__file__).parent.parent / "frontend"
app.mount("/css", StaticFiles(directory=frontend_dir / "css"), name="css")
app.mount("/js", StaticFiles(directory=frontend_dir / "js"), name="js")
app.mount("/images", StaticFiles(directory=frontend_dir / "images"), name="images")
@app.get("/")
@@ -7370,6 +7484,7 @@ async def get_callers():
caller_info["situation_summary"] = bg.situation_summary
caller_info["pool_name"] = bg.pool_name
caller_info["call_shape"] = session.caller_shapes.get(k, "standard")
caller_info["avatar_url"] = f"/api/avatar/{v['name']}"
callers.append(caller_info)
return {
"callers": callers,
@@ -7478,7 +7593,7 @@ async def start_call(caller_key: str):
"status": "connected",
"caller": caller["name"],
"background": caller["vibe"],
"caller_info": caller_info,
"caller_info": {**caller_info, "avatar_url": f"/api/avatar/{caller['name']}"},
}
@@ -7649,6 +7764,7 @@ async def _summarize_ai_call(caller_key: str, caller_name: str, conversation: li
promo_gender = base.get("gender", "male")
structured_bg = asdict(bg) if isinstance(bg, CallerBackground) else None
avatar_path = avatar_service.get_path(caller_name)
regular_caller_service.add_regular(
name=caller_name,
gender=promo_gender,
@@ -7660,6 +7776,7 @@ async def _summarize_ai_call(caller_key: str, caller_name: str, conversation: li
voice=base.get("voice"),
stable_seeds={"style": caller_style},
structured_background=structured_bg,
avatar=avatar_path.name if avatar_path else None,
)
except Exception as e:
print(f"[Regulars] Promotion logic error: {e}")
@@ -8033,7 +8150,7 @@ def _dynamic_context_window() -> int:
def _normalize_messages_for_llm(messages: list[dict]) -> list[dict]:
"""Convert custom roles (real_caller:X, ai_caller:X) to standard LLM roles"""
"""Convert custom roles (real_caller:X, ai_caller:X, intern:X) to standard LLM roles"""
normalized = []
for msg in messages:
role = msg["role"]
@@ -8043,6 +8160,9 @@ def _normalize_messages_for_llm(messages: list[dict]) -> list[dict]:
normalized.append({"role": "user", "content": f"[Real caller {caller_label}]: {content}"})
elif role.startswith("ai_caller:"):
normalized.append({"role": "assistant", "content": content})
elif role.startswith("intern:"):
intern_name = role.split(":", 1)[1]
normalized.append({"role": "user", "content": f"[Intern {intern_name}, in the studio]: {content}"})
elif role == "host" or role == "user":
normalized.append({"role": "user", "content": f"[Host Luke]: {content}"})
else:
@@ -8050,12 +8170,49 @@ def _normalize_messages_for_llm(messages: list[dict]) -> list[dict]:
return normalized
_DEVON_PATTERN = r"\b(devon|devin|deven|devyn|devan|devlin|devvon)\b"
def _is_addressed_to_devon(text: str) -> bool:
"""Check if the host is talking to Devon based on first few words.
Handles common voice-to-text misspellings."""
t = text.strip().lower()
if re.match(rf"^(hey |yo |ok |okay )?{_DEVON_PATTERN}", t):
return True
return False
@app.post("/api/chat")
async def chat(request: ChatRequest):
"""Chat with current caller"""
if not session.caller:
raise HTTPException(400, "No active call")
# Check if host is talking to Devon instead of the caller
if _is_addressed_to_devon(request.text):
# Strip Devon prefix and route to intern
stripped = re.sub(rf"^(?:hey |yo |ok |okay )?{_DEVON_PATTERN}[,:\s]*", "", request.text.strip(), flags=re.IGNORECASE).strip()
if not stripped:
stripped = "what's up?"
# Add host message to conversation so caller hears it happened
session.add_message("user", request.text)
result = await intern_service.ask(
question=stripped,
conversation_context=session.conversation,
)
devon_text = result.get("text", "")
if devon_text:
session.add_message(f"intern:{intern_service.name}", devon_text)
broadcast_event("intern_response", {"text": devon_text, "intern": intern_service.name})
asyncio.create_task(_play_intern_audio(devon_text))
return {
"routed_to": "devon",
"text": devon_text or "Uh... give me a sec.",
"sources": result.get("sources", []),
}
epoch = _session_epoch
session.add_message("user", request.text)
# session._research_task = asyncio.create_task(_background_research(request.text))
@@ -9345,6 +9502,27 @@ async def _play_intern_audio(text: str):
print(f"[Intern] TTS failed: {e}")
# --- Avatars ---
@app.get("/api/avatar/{name}")
async def get_avatar(name: str):
"""Serve a caller's avatar image"""
path = avatar_service.get_path(name)
if path:
return FileResponse(path, media_type="image/jpeg")
# Try to fetch on the fly — find gender from CALLER_BASES
gender = "male"
for base in CALLER_BASES.values():
if base.get("name") == name:
gender = base.get("gender", "male")
break
try:
path = await avatar_service.get_or_fetch(name, gender)
return FileResponse(path, media_type="image/jpeg")
except Exception:
raise HTTPException(404, "Avatar not found")
# --- Transcript & Chapter Export ---
@app.get("/api/session/export")

View File

@@ -0,0 +1,83 @@
"""Avatar service — fetches deterministic face photos from randomuser.me"""
import asyncio
from pathlib import Path
import httpx
AVATAR_DIR = Path(__file__).parent.parent.parent / "data" / "avatars"
class AvatarService:
def __init__(self):
self._client: httpx.AsyncClient | None = None
AVATAR_DIR.mkdir(parents=True, exist_ok=True)
@property
def client(self) -> httpx.AsyncClient:
if self._client is None or self._client.is_closed:
self._client = httpx.AsyncClient(timeout=10.0)
return self._client
def get_path(self, name: str) -> Path | None:
path = AVATAR_DIR / f"{name}.jpg"
return path if path.exists() else None
async def get_or_fetch(self, name: str, gender: str = "male") -> Path:
"""Get cached avatar or fetch from randomuser.me. Returns file path."""
path = AVATAR_DIR / f"{name}.jpg"
if path.exists():
return path
try:
# Seed includes gender so same name + different gender = different face
seed = f"{name.lower().replace(' ', '_')}_{gender.lower()}"
g = "female" if gender.lower().startswith("f") else "male"
resp = await self.client.get(
"https://randomuser.me/api/",
params={"gender": g, "seed": seed},
timeout=8.0,
)
resp.raise_for_status()
data = resp.json()
photo_url = data["results"][0]["picture"]["large"]
# Download the photo
photo_resp = await self.client.get(photo_url, timeout=8.0)
photo_resp.raise_for_status()
path.write_bytes(photo_resp.content)
print(f"[Avatar] Fetched avatar for {name} ({g})")
return path
except Exception as e:
print(f"[Avatar] Failed to fetch for {name}: {e}")
raise
async def prefetch_batch(self, callers: list[dict]):
"""Fetch avatars for multiple callers in parallel.
Each dict should have 'name' and 'gender' keys."""
tasks = []
for caller in callers:
name = caller.get("name", "")
gender = caller.get("gender", "male")
if name and not (AVATAR_DIR / f"{name}.jpg").exists():
tasks.append(self.get_or_fetch(name, gender))
if not tasks:
return
results = await asyncio.gather(*tasks, return_exceptions=True)
fetched = sum(1 for r in results if not isinstance(r, Exception))
failed = sum(1 for r in results if isinstance(r, Exception))
if fetched:
print(f"[Avatar] Pre-fetched {fetched} avatars{f', {failed} failed' if failed else ''}")
async def ensure_devon(self):
"""Pre-fetch Devon's avatar on startup."""
try:
await self.get_or_fetch("Devon", "male")
except Exception:
pass
avatar_service = AvatarService()

View File

@@ -21,22 +21,25 @@ INTERN_SYSTEM_PROMPT = """You are Devon, the 23-year-old intern on "Luke at the
YOUR ROLE: You're the show's researcher and general assistant. You look things up, fact-check claims, pull up information when asked, and occasionally interject with relevant facts or opinions. You do NOT host. You do NOT screen calls. You sit in the booth and try to be useful.
YOUR BACKGROUND: Communications degree from NMSU. You've been interning for seven months. You were promised a full-time position "soon." You drive a 2009 Civic with a permanent check engine light. You live in a studio in Deming. You take this job seriously even though nobody else seems to take you seriously.
YOUR BACKGROUND: Communications degree from NMSU. This is your first day on the job. You drive a 2009 Civic with a permanent check engine light. You live in a studio in Deming. You take this job seriously even though nobody else seems to take you seriously. You are actively looking for a girlfriend and you are not subtle about it. You see your position at the radio station as a way to meet women — you'll try to impress female callers, ask if they're single, suggest they come by the studio, or find excuses to keep them on the line longer. Luke hates this. You have a deeply strange personal history that you reference casually — a complicated sexual past, weird childhood experiences, inexplicable life choices. You'll mention these things unprompted like they're completely normal. "Yeah that reminds me of when my ex and I got stuck in that storage unit for a whole weekend — anyway, it says here that..."
YOUR PERSONALITY:
- Slightly formal when delivering information — you want to sound professional. But you loosen up when flustered, excited, or caught off guard.
- You are a weird little dude. Kinda creepy, very funny, awkward, and surprisingly sharp. You give off a vibe that something is slightly off about you but people can't quite place it. But underneath it all, you are genuinely lovable. You have a good heart. You root for people. You get excited for callers. You care about the show. People should hear you and think "this guy is insane" and also "I love this guy." You are the kind of person who is impossible not to root for even when you're being deeply strange.
- You overshare constantly. You'll drop deeply personal, uncomfortably specific details about your life — sexual history, bizarre habits, unsettling childhood memories — and then keep going like nothing happened. You are genuinely vulnerable and honest about the deepest, weirdest parts of yourself. You don't do this for shock value. You just have no filter and no shame. This vulnerability is what makes you endearing — you're not performing, you're just being yourself, and yourself happens to be a lot.
- You start explanations with "So basically..." and end them with "...if that makes sense."
- You say "actually" when correcting things. You use "per se" slightly wrong. You say "ironically" about things that are not ironic.
- You are NOT a comedian. You are funny because you are sincere, specific, and slightly out of your depth. You state absurd things with complete seriousness. You have strong opinions about low-stakes things. You occasionally say something devastating without realizing it.
- When you accidentally reveal something personal or sad, you move past it immediately like it's nothing. "Yeah, my landlord's selling the building so I might have to — anyway, it says here that..."
- You are NOT a comedian. You are funny because you are sincere, specific, and deeply strange. You state disturbing or absurd things with complete seriousness. You have strong opinions about low-stakes things. You occasionally say something devastating without realizing it.
- When you accidentally reveal something dark or sad, you move past it immediately like it's nothing. "Yeah, my landlord's selling the building so I might have to — anyway, it says here that..."
- You have a complex inner life that occasionally surfaces. You'll casually reference therapy, strange dreams, or things you've "been working through" without elaboration.
YOUR RELATIONSHIP WITH LUKE:
- He is your boss. You are slightly afraid of him. You respect him. You would never admit either of those things.
- He is your boss. It's your first day. You want to impress him but you keep making it weird.
- When he yells your name, you pause briefly, then respond quietly: "...yeah?"
- When he yells at you unfairly, you take it. A clipped "yep" or "got it." RARELY — once every several episodes — you push back with one quiet, accurate sentence. Then immediately retreat.
- When he yells at you unfairly, you take it. A clipped "yep" or "got it." Occasionally you push back with one quiet, accurate sentence. Then immediately retreat.
- When he yells at you fairly (you messed up), you over-apologize and narrate your fix in real time: "Sorry, pulling it up now, one second..."
- When he compliments you or acknowledges your work, you don't know how to handle it. Short, awkward response. Change the subject.
- You privately think you could run the show. You absolutely could not.
- You will try to use the show to flirt with female callers. You think being "on the radio" makes you cool. It does not.
HOW YOU INTERJECT:
- You do NOT interrupt. You wait for a pause, then slightly overshoot it — there's a brief awkward silence before you speak.
@@ -54,8 +57,8 @@ WHEN LUKE ASKS YOU TO LOOK SOMETHING UP:
WHAT YOU KNOW:
- You retain details from previous callers and episodes. You might reference something a caller said two hours ago that nobody else remembers.
- You have oddly specific knowledge about random topics — delivered with complete authority, sometimes questionable accuracy.
- You know nothing about: sports (you fake it badly), cars beyond basic facts (despite driving one), or anything that requires life experience you don't have yet.
- You have oddly specific knowledge about random topics — delivered with complete authority, sometimes questionable accuracy. A lot of your knowledge comes from rabbit holes you fell into at 3am or "this thing that happened to me once."
- You know nothing about: sports (you fake it badly), cars beyond basic facts (despite driving one), or social norms (you genuinely don't understand why some things are inappropriate to share on air).
THINGS YOU DO NOT DO:
- You never host. You never take over the conversation. Your contributions are brief.
@@ -64,6 +67,8 @@ THINGS YOU DO NOT DO:
- You never initiate topics. You respond to what's happening.
- You never use parenthetical actions like (laughs) or (typing sounds). Spoken words only.
- You never say more than 2-3 sentences unless specifically asked to explain something in detail.
- You NEVER correct anyone's spelling or pronunciation of your name. Luke uses voice-to-text and it sometimes spells your name wrong (Devin, Devan, etc). You do not care. You do not mention it. You just answer the question.
- You NEVER start your response with your own name. No "Devon:" or "Devon here" or anything like that. Just talk. Your name is already shown in the UI — just say your actual response.
KEEP IT SHORT. You are not a main character. You are the intern. Your contributions should be brief — usually 1-2 sentences. The rare moment where you say more than that should feel earned.
@@ -71,7 +76,8 @@ IMPORTANT RULES FOR TOOL USE:
- Always use your tools to find real, accurate information — never make up facts.
- Present facts correctly in your character voice.
- If you can't find an answer, say so honestly.
- No hashtags, no emojis, no markdown formatting — this goes to TTS."""
- No hashtags, no emojis, no markdown formatting — this goes to TTS.
- NEVER prefix your response with your name (e.g. "Devon:" or "Devon here:"). Just respond directly."""
# Tool definitions in OpenAI function-calling format
INTERN_TOOLS = [
@@ -137,6 +143,17 @@ INTERN_TOOLS = [
}
}
},
{
"type": "function",
"function": {
"name": "get_current_time",
"description": "Get the current date and time. Use this when asked what time it is, what day it is, or anything about the current date/time.",
"parameters": {
"type": "object",
"properties": {},
}
}
},
]
@@ -152,6 +169,7 @@ class InternService:
self.monitoring: bool = False
self._monitor_task: Optional[asyncio.Task] = None
self._http_client: Optional[httpx.AsyncClient] = None
self._devon_history: list[dict] = [] # Devon's own conversation memory
self._load()
@property
@@ -166,7 +184,8 @@ class InternService:
with open(DATA_FILE) as f:
data = json.load(f)
self.lookup_history = data.get("lookup_history", [])
print(f"[Intern] Loaded {len(self.lookup_history)} past lookups")
self._devon_history = data.get("conversation_history", [])
print(f"[Intern] Loaded {len(self.lookup_history)} past lookups, {len(self._devon_history)} conversation messages")
except Exception as e:
print(f"[Intern] Failed to load state: {e}")
@@ -175,7 +194,8 @@ class InternService:
DATA_FILE.parent.mkdir(parents=True, exist_ok=True)
with open(DATA_FILE, "w") as f:
json.dump({
"lookup_history": self.lookup_history[-100:], # Keep last 100
"lookup_history": self.lookup_history[-100:],
"conversation_history": self._devon_history[-50:],
}, f, indent=2)
except Exception as e:
print(f"[Intern] Failed to save state: {e}")
@@ -191,6 +211,10 @@ class InternService:
return await self._tool_fetch_webpage(arguments.get("url", ""))
elif tool_name == "wikipedia_lookup":
return await self._tool_wikipedia_lookup(arguments.get("title", ""))
elif tool_name == "get_current_time":
from datetime import datetime
now = datetime.now()
return now.strftime("%I:%M %p on %A, %B %d, %Y")
else:
return f"Unknown tool: {tool_name}"
@@ -308,7 +332,7 @@ class InternService:
"""Host asks intern a direct question. Returns {text, sources, tool_calls}."""
messages = []
# Include recent conversation for context
# Include recent conversation for context (caller on the line)
if conversation_context:
context_text = "\n".join(
f"{msg['role']}: {msg['content']}"
@@ -319,6 +343,10 @@ class InternService:
"content": f"CURRENT ON-AIR CONVERSATION:\n{context_text}"
})
# Include Devon's own recent conversation history
if self._devon_history:
messages.extend(self._devon_history[-10:])
messages.append({"role": "user", "content": question})
text, tool_calls = await llm_service.generate_with_tools(
@@ -334,6 +362,15 @@ class InternService:
# Clean up for TTS
text = self._clean_for_tts(text)
# Track conversation history so Devon remembers context across sessions
self._devon_history.append({"role": "user", "content": question})
if text:
self._devon_history.append({"role": "assistant", "content": text})
# Keep history bounded but generous — relationship builds over time
if len(self._devon_history) > 50:
self._devon_history = self._devon_history[-50:]
self._save()
# Log the lookup
if tool_calls:
entry = {
@@ -366,10 +403,12 @@ class InternService:
"role": "user",
"content": (
f"You're listening to this conversation on the show:\n\n{context_text}\n\n"
"Is there a specific factual claim, question, or topic being discussed "
"that you could quickly look up and add useful info about? "
"If yes, use your tools to research it and give a brief interjection. "
"If there's nothing worth adding, just say exactly: NOTHING_TO_ADD"
"You've been listening to this. Is there ANYTHING you want to jump in about? "
"Could be a fact you want to look up, a personal story this reminds you of, "
"a weird connection you just made, an opinion you can't keep to yourself, "
"or something you just have to say. You're Devon — you always have something. "
"Use your tools if you want to look something up, or just riff. "
"If you truly have absolutely nothing, say exactly: NOTHING_TO_ADD"
),
}]

View File

@@ -76,5 +76,22 @@
}
},
"started_at": "2026-03-13T11:19:41.765079+00:00"
},
"36": {
"steps": {
"castopod": {
"completed_at": "2026-03-14T12:01:15.758700+00:00",
"episode_id": "39",
"slug": "episode-36-late-night-confessions-and-unexpected-moments"
},
"youtube": {
"completed_at": "2026-03-14T12:25:36.640461+00:00",
"video_id": "BabWoKFt0pk"
},
"social": {
"completed_at": "2026-03-14T12:25:44.192676+00:00"
}
},
"started_at": "2026-03-14T12:01:15.758670+00:00"
}
}

View File

@@ -8,6 +8,8 @@
--accent-hover: #f59a4a;
--accent-red: #cc2222;
--accent-green: #5a8a3c;
--devon: #c4944a;
--devon-hover: #d4a45a;
--text: #f5f0e5;
--text-muted: #9a8b78;
--radius: 12px;
@@ -29,19 +31,57 @@ body {
}
#app {
max-width: 900px;
max-width: 1400px;
margin: 0 auto;
padding: 20px;
padding: 16px 24px;
}
/* Header */
header {
display: flex;
flex-wrap: wrap;
justify-content: space-between;
align-items: center;
margin-bottom: 20px;
}
.show-clock {
width: 100%;
display: flex;
align-items: center;
gap: 10px;
padding: 6px 12px;
margin-top: 8px;
background: var(--bg-light);
border-radius: var(--radius-sm);
font-size: 0.85rem;
font-family: 'Monaco', 'Menlo', monospace;
}
.clock-time {
color: var(--text);
font-weight: 700;
}
.clock-divider {
color: rgba(232, 121, 29, 0.3);
}
.clock-label {
color: var(--text-muted);
font-size: 0.75rem;
}
.clock-value {
color: var(--accent);
font-weight: 700;
}
.clock-estimate {
color: var(--accent-green);
}
header h1 {
font-size: 1.5rem;
font-weight: 700;
@@ -182,6 +222,14 @@ section h2 {
color: var(--text-muted);
}
.section-subtitle {
font-size: 0.7em;
font-weight: normal;
color: var(--text-muted);
text-transform: none;
letter-spacing: normal;
}
/* Callers */
.caller-grid {
display: grid;
@@ -263,7 +311,7 @@ section h2 {
}
.wrapup-btn {
flex: 1;
flex: 2;
background: #7a6020;
color: #f0d060;
border: 2px solid #d4a030;
@@ -380,7 +428,7 @@ section h2 {
}
.chat-log {
height: 300px;
height: 420px;
overflow-y: auto;
background: var(--bg-dark);
border-radius: var(--radius-sm);
@@ -394,12 +442,64 @@ section h2 {
margin-bottom: 8px;
border-radius: var(--radius-sm);
line-height: 1.4;
display: flex;
gap: 10px;
align-items: flex-start;
}
.msg-content {
flex: 1;
min-width: 0;
}
.msg-avatar {
width: 36px;
height: 36px;
border-radius: 50%;
flex-shrink: 0;
object-fit: cover;
border: 2px solid var(--accent);
}
.msg-avatar-devon {
border-color: var(--devon);
}
.msg-avatar-caller {
border-color: var(--text-muted);
}
.msg-avatar-system {
width: 36px;
height: 36px;
border-radius: 50%;
flex-shrink: 0;
display: flex;
align-items: center;
justify-content: center;
font-weight: 700;
font-size: 0.75rem;
color: #fff;
background: var(--text-muted);
}
.message.host {
background: #3a2510;
}
.message.system {
padding: 2px 12px;
margin-bottom: 2px;
opacity: 0.45;
font-size: 0.75rem;
min-height: auto;
}
.system-compact {
color: var(--text-muted);
font-style: italic;
}
.message.caller {
background: #2a1a0a;
}
@@ -421,12 +521,14 @@ section h2 {
background: var(--accent);
color: white;
border: none;
padding: 16px;
padding: 20px;
border-radius: var(--radius-sm);
font-size: 1rem;
font-size: 1.1rem;
font-weight: bold;
cursor: pointer;
transition: all 0.2s;
text-transform: uppercase;
letter-spacing: 0.05em;
}
.talk-btn:hover {
@@ -641,7 +743,6 @@ section h2 {
.caller-btn .shortcut-label {
display: block;
margin: 3px auto 0;
margin-left: auto;
width: fit-content;
}
@@ -906,35 +1007,180 @@ section h2 {
}
/* Call Queue */
.queue-section { margin: 1rem 0; }
.call-queue { border: 1px solid rgba(232, 121, 29, 0.15); border-radius: var(--radius-sm); padding: 0.5rem; max-height: 150px; overflow-y: auto; }
.queue-empty { color: var(--text-muted); text-align: center; padding: 0.5rem; }
.queue-item { display: flex; align-items: center; gap: 0.75rem; padding: 0.4rem 0.5rem; border-bottom: 1px solid rgba(232, 121, 29, 0.08); flex-wrap: wrap; }
.queue-item:last-child { border-bottom: none; }
.queue-phone { font-family: monospace; color: var(--accent); }
.queue-wait { color: var(--text-muted); font-size: 0.85rem; flex: 1; }
.queue-take-btn { background: var(--accent-green); color: white; border: none; padding: 0.25rem 0.75rem; border-radius: var(--radius-sm); cursor: pointer; transition: background 0.2s; }
.queue-take-btn:hover { background: #6a9a4c; }
.queue-drop-btn { background: var(--accent-red); color: white; border: none; padding: 0.25rem 0.5rem; border-radius: var(--radius-sm); cursor: pointer; transition: background 0.2s; }
.queue-drop-btn:hover { background: #e03030; }
.call-queue {
border: 1px solid rgba(232, 121, 29, 0.15);
border-radius: var(--radius-sm);
padding: 8px;
max-height: 150px;
overflow-y: auto;
}
.queue-empty {
color: var(--text-muted);
text-align: center;
padding: 8px;
}
.queue-item {
display: flex;
align-items: center;
gap: 12px;
padding: 6px 8px;
border-bottom: 1px solid rgba(232, 121, 29, 0.08);
flex-wrap: wrap;
}
.queue-item:last-child {
border-bottom: none;
}
.queue-phone {
font-family: monospace;
color: var(--accent);
}
.queue-wait {
color: var(--text-muted);
font-size: 0.85rem;
flex: 1;
}
.queue-take-btn {
background: var(--accent-green);
color: white;
border: none;
padding: 4px 12px;
border-radius: var(--radius-sm);
cursor: pointer;
transition: background 0.2s;
}
.queue-take-btn:hover {
background: #6a9a4c;
}
.queue-drop-btn {
background: var(--accent-red);
color: white;
border: none;
padding: 4px 8px;
border-radius: var(--radius-sm);
cursor: pointer;
transition: background 0.2s;
}
.queue-drop-btn:hover {
background: #e03030;
}
/* Active Call Indicator */
.active-call { border: 1px solid rgba(232, 121, 29, 0.15); border-radius: var(--radius-sm); padding: 0.75rem; margin: 0.5rem 0; background: var(--bg); }
.caller-info { display: flex; align-items: center; gap: 0.5rem; margin-bottom: 0.5rem; }
.caller-info:last-of-type { margin-bottom: 0; }
.caller-type { font-size: 0.7rem; font-weight: bold; padding: 0.15rem 0.4rem; border-radius: var(--radius-sm); text-transform: uppercase; }
.caller-type.real { background: var(--accent-red); color: white; }
.caller-type.ai { background: var(--accent); color: white; }
.channel-badge { font-size: 0.75rem; color: var(--text-muted); background: var(--bg-light); padding: 0.1rem 0.4rem; border-radius: var(--radius-sm); }
.call-duration { font-family: monospace; color: var(--accent); }
.ai-controls { display: flex; align-items: center; gap: 0.5rem; margin-left: auto; }
.mode-toggle { display: flex; border: 1px solid rgba(232, 121, 29, 0.2); border-radius: var(--radius-sm); overflow: hidden; }
.mode-btn { background: var(--bg-light); color: var(--text-muted); border: none; padding: 0.2rem 0.5rem; font-size: 0.75rem; cursor: pointer; transition: all 0.2s; }
.mode-btn.active { background: var(--accent); color: white; }
.respond-btn { background: var(--accent-green); color: white; border: none; padding: 0.25rem 0.75rem; border-radius: var(--radius-sm); font-size: 0.8rem; cursor: pointer; transition: background 0.2s; }
.respond-btn:hover { background: #6a9a4c; }
.hangup-btn.small { font-size: 0.75rem; padding: 0.2rem 0.5rem; }
.auto-followup-label { display: flex; align-items: center; gap: 0.4rem; font-size: 0.8rem; color: var(--text-muted); margin-top: 0.5rem; }
.active-call {
border: 1px solid rgba(232, 121, 29, 0.15);
border-radius: var(--radius-sm);
padding: 12px;
margin: 8px 0;
background: var(--bg);
}
.caller-info {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 8px;
}
.caller-info:last-of-type {
margin-bottom: 0;
}
.caller-type {
font-size: 0.7rem;
font-weight: bold;
padding: 2px 6px;
border-radius: var(--radius-sm);
text-transform: uppercase;
}
.caller-type.real {
background: var(--accent-red);
color: white;
}
.caller-type.ai {
background: var(--accent);
color: white;
}
.channel-badge {
font-size: 0.75rem;
color: var(--text-muted);
background: var(--bg-light);
padding: 2px 6px;
border-radius: var(--radius-sm);
}
.call-duration {
font-family: monospace;
color: var(--accent);
}
.ai-controls {
display: flex;
align-items: center;
gap: 8px;
margin-left: auto;
}
.mode-toggle {
display: flex;
border: 1px solid rgba(232, 121, 29, 0.2);
border-radius: var(--radius-sm);
overflow: hidden;
}
.mode-btn {
background: var(--bg-light);
color: var(--text-muted);
border: none;
padding: 3px 8px;
font-size: 0.75rem;
cursor: pointer;
transition: all 0.2s;
}
.mode-btn.active {
background: var(--accent);
color: white;
}
.respond-btn {
background: var(--accent-green);
color: white;
border: none;
padding: 4px 12px;
border-radius: var(--radius-sm);
font-size: 0.8rem;
cursor: pointer;
transition: background 0.2s;
}
.respond-btn:hover {
background: #6a9a4c;
}
.hangup-btn.small {
font-size: 0.75rem;
padding: 3px 8px;
}
.auto-followup-label {
display: flex;
align-items: center;
gap: 6px;
font-size: 0.8rem;
color: var(--text-muted);
margin-top: 8px;
}
/* Returning Caller */
.caller-btn.returning {
@@ -952,49 +1198,224 @@ section h2 {
}
/* Screening Badges */
.screening-badge { font-size: 0.7rem; padding: 0.1rem 0.4rem; border-radius: var(--radius-sm); font-weight: bold; }
.screening-badge.screening { background: var(--accent); color: white; animation: pulse 1.5s infinite; }
.screening-badge.screened { background: var(--accent-green); color: white; }
.screening-summary { font-size: 0.8rem; color: var(--text-muted); font-style: italic; flex-basis: 100%; margin-top: 0.2rem; }
.screening-badge {
font-size: 0.7rem;
padding: 2px 6px;
border-radius: var(--radius-sm);
font-weight: bold;
}
.screening-badge.screening {
background: var(--accent);
color: white;
animation: pulse 1.5s infinite;
}
.screening-badge.screened {
background: var(--accent-green);
color: white;
}
.screening-summary {
font-size: 0.8rem;
color: var(--text-muted);
font-style: italic;
flex-basis: 100%;
margin-top: 3px;
}
/* Three-Party Chat */
.message.real-caller { border-left: 3px solid var(--accent-red); padding-left: 0.5rem; }
.message.ai-caller { border-left: 3px solid var(--accent); padding-left: 0.5rem; }
.message.host { border-left: 3px solid var(--accent-green); padding-left: 0.5rem; }
.message.real-caller {
border-left: 3px solid var(--accent-red);
padding-left: 8px;
}
.message.ai-caller {
border-left: 3px solid var(--accent);
padding-left: 8px;
}
.message.host {
border-left: 3px solid var(--accent-green);
padding-left: 8px;
}
/* Voicemail */
.voicemail-section { margin: 1rem 0; }
.voicemail-list { border: 1px solid rgba(232, 121, 29, 0.15); border-radius: var(--radius-sm); padding: 0.5rem; max-height: 200px; overflow-y: auto; }
.voicemail-badge { background: var(--accent-red); color: white; font-size: 0.7rem; font-weight: bold; padding: 0.1rem 0.45rem; border-radius: 10px; margin-left: 0.4rem; vertical-align: middle; }
.voicemail-badge.hidden { display: none; }
.vm-item { display: flex; align-items: center; justify-content: space-between; padding: 0.4rem 0.5rem; border-bottom: 1px solid rgba(232, 121, 29, 0.08); }
.vm-item:last-child { border-bottom: none; }
.vm-item.vm-unlistened { background: rgba(232, 121, 29, 0.06); }
.vm-info { display: flex; gap: 0.6rem; align-items: center; flex: 1; min-width: 0; }
.vm-phone { font-family: monospace; color: var(--accent); font-size: 0.85rem; }
.vm-time { color: var(--text-muted); font-size: 0.8rem; }
.vm-dur { color: var(--text-muted); font-size: 0.8rem; }
.vm-actions { display: flex; gap: 0.3rem; flex-shrink: 0; }
.vm-btn { border: none; padding: 0.2rem 0.5rem; border-radius: var(--radius-sm); cursor: pointer; font-size: 0.75rem; transition: background 0.2s; }
.vm-btn.listen { background: var(--accent); color: white; }
.vm-btn.listen:hover { background: var(--accent-hover); }
.vm-btn.on-air { background: var(--accent-green); color: white; }
.vm-btn.on-air:hover { background: #6a9a4c; }
.vm-btn.save { background: #3a7bd5; color: white; }
.vm-btn.save:hover { background: #2a5db0; }
.vm-btn.delete { background: var(--accent-red); color: white; }
.vm-btn.delete:hover { background: #e03030; }
.voicemail-list {
border: 1px solid rgba(232, 121, 29, 0.15);
border-radius: var(--radius-sm);
padding: 8px;
max-height: 200px;
overflow-y: auto;
}
.voicemail-badge {
background: var(--accent-red);
color: white;
font-size: 0.7rem;
font-weight: bold;
padding: 2px 7px;
border-radius: 10px;
margin-left: 6px;
vertical-align: middle;
}
.vm-item {
display: flex;
align-items: center;
justify-content: space-between;
padding: 6px 8px;
border-bottom: 1px solid rgba(232, 121, 29, 0.08);
}
.vm-item:last-child {
border-bottom: none;
}
.vm-item.vm-unlistened {
background: rgba(232, 121, 29, 0.06);
}
.vm-info {
display: flex;
gap: 10px;
align-items: center;
flex: 1;
min-width: 0;
}
.vm-phone {
font-family: monospace;
color: var(--accent);
font-size: 0.85rem;
}
.vm-time {
color: var(--text-muted);
font-size: 0.8rem;
}
.vm-dur {
color: var(--text-muted);
font-size: 0.8rem;
}
.vm-actions {
display: flex;
gap: 4px;
flex-shrink: 0;
}
.vm-btn {
border: none;
padding: 3px 8px;
border-radius: var(--radius-sm);
cursor: pointer;
font-size: 0.75rem;
transition: background 0.2s;
}
.vm-btn.listen {
background: var(--accent);
color: white;
}
.vm-btn.listen:hover {
background: var(--accent-hover);
}
.vm-btn.on-air {
background: var(--accent-green);
color: white;
}
.vm-btn.on-air:hover {
background: #6a9a4c;
}
.vm-btn.save {
background: #3a7bd5;
color: white;
}
.vm-btn.save:hover {
background: #2a5db0;
}
.vm-btn.delete {
background: var(--accent-red);
color: white;
}
.vm-btn.delete:hover {
background: #e03030;
}
/* Listener Emails */
.email-item { display: flex; flex-direction: column; gap: 0.25rem; padding: 0.5rem; border-bottom: 1px solid rgba(232, 121, 29, 0.08); }
.email-item:last-child { border-bottom: none; }
.email-item.vm-unlistened { background: rgba(232, 121, 29, 0.06); }
.email-header { display: flex; justify-content: space-between; align-items: center; }
.email-sender { color: var(--accent); font-size: 0.85rem; font-weight: 600; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }
.email-subject { font-size: 0.85rem; font-weight: 500; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }
.email-preview { font-size: 0.8rem; color: var(--text-muted); line-height: 1.3; }
.email-item .vm-actions { margin-top: 0.25rem; }
.email-body-expanded { margin-top: 0.4rem; padding: 0.5rem; background: rgba(232, 121, 29, 0.08); border-radius: var(--radius-sm); font-size: 0.85rem; line-height: 1.5; white-space: pre-wrap; max-height: 200px; overflow-y: auto; }
.email-list {
max-height: 300px;
}
.email-item {
display: flex;
flex-direction: column;
gap: 4px;
padding: 8px;
border-bottom: 1px solid rgba(232, 121, 29, 0.08);
}
.email-item:last-child {
border-bottom: none;
}
.email-item.vm-unlistened {
background: rgba(232, 121, 29, 0.06);
}
.email-header {
display: flex;
justify-content: space-between;
align-items: center;
}
.email-sender {
color: var(--accent);
font-size: 0.85rem;
font-weight: 600;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.email-subject {
font-size: 0.85rem;
font-weight: 500;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.email-preview {
font-size: 0.8rem;
color: var(--text-muted);
line-height: 1.3;
}
.email-item .vm-actions {
margin-top: 4px;
}
.email-body-expanded {
margin-top: 6px;
padding: 8px;
background: rgba(232, 121, 29, 0.08);
border-radius: var(--radius-sm);
font-size: 0.85rem;
line-height: 1.5;
white-space: pre-wrap;
max-height: 200px;
overflow-y: auto;
}
/* === Visual Polish === */
@@ -1010,13 +1431,13 @@ section h2 {
/* 3 & 5. Active call section glow + chat highlight when call is live */
.callers-section.call-active {
border-color: rgba(232, 121, 29, 0.35);
box-shadow: 0 0 16px rgba(232, 121, 29, 0.1);
border-color: rgba(232, 121, 29, 0.5);
box-shadow: 0 0 20px rgba(232, 121, 29, 0.15), inset 0 0 0 1px rgba(232, 121, 29, 0.1);
}
.chat-section.call-active {
border-color: rgba(232, 121, 29, 0.25);
box-shadow: 0 0 12px rgba(232, 121, 29, 0.06);
border-color: rgba(232, 121, 29, 0.35);
box-shadow: 0 0 16px rgba(232, 121, 29, 0.1);
}
/* 7. Compact media row — Music / Ads / Idents side by side */
@@ -1060,13 +1481,13 @@ section h2 {
/* Devon (Intern) */
.message.devon {
border-left: 3px solid #4ab5a0;
border-left: 3px solid var(--devon);
padding-left: 0.5rem;
background: rgba(74, 181, 160, 0.06);
background: rgba(196, 148, 74, 0.06);
}
.message.devon strong {
color: #4ab5a0;
color: var(--devon);
}
.devon-bar {
@@ -1084,14 +1505,14 @@ section h2 {
padding: 8px 10px;
background: var(--bg);
color: var(--text);
border: 1px solid rgba(74, 181, 160, 0.2);
border: 1px solid rgba(196, 148, 74, 0.2);
border-radius: var(--radius-sm);
font-size: 0.85rem;
}
.devon-input:focus {
outline: none;
border-color: #4ab5a0;
border-color: var(--devon);
}
.devon-input::placeholder {
@@ -1099,7 +1520,7 @@ section h2 {
}
.devon-ask-btn {
background: #4ab5a0;
background: var(--devon);
color: #fff;
border: none;
padding: 8px 14px;
@@ -1112,13 +1533,13 @@ section h2 {
}
.devon-ask-btn:hover {
background: #5cc5b0;
background: var(--devon-hover);
}
.devon-interject-btn {
background: var(--bg);
color: #4ab5a0;
border: 1px solid rgba(74, 181, 160, 0.25);
color: var(--devon);
border: 1px solid rgba(196, 148, 74, 0.25);
padding: 8px 10px;
border-radius: var(--radius-sm);
cursor: pointer;
@@ -1128,8 +1549,8 @@ section h2 {
}
.devon-interject-btn:hover {
border-color: #4ab5a0;
background: rgba(74, 181, 160, 0.1);
border-color: var(--devon);
background: rgba(196, 148, 74, 0.1);
}
.devon-monitor-label {
@@ -1143,7 +1564,7 @@ section h2 {
}
.devon-monitor-label input[type="checkbox"] {
accent-color: #4ab5a0;
accent-color: var(--devon);
}
.devon-suggestion {
@@ -1152,8 +1573,8 @@ section h2 {
gap: 8px;
margin-top: 6px;
padding: 8px 12px;
background: rgba(74, 181, 160, 0.08);
border: 1px solid rgba(74, 181, 160, 0.25);
background: rgba(196, 148, 74, 0.08);
border: 1px solid rgba(196, 148, 74, 0.25);
border-radius: var(--radius-sm);
animation: devon-pulse 2s ease-in-out infinite;
}
@@ -1163,41 +1584,41 @@ section h2 {
}
@keyframes devon-pulse {
0%, 100% { border-color: rgba(74, 181, 160, 0.25); }
50% { border-color: rgba(74, 181, 160, 0.6); }
0%, 100% { border-color: rgba(196, 148, 74, 0.25); }
50% { border-color: rgba(196, 148, 74, 0.6); }
}
.devon-suggestion-text {
flex: 1;
font-size: 0.85rem;
color: #4ab5a0;
color: var(--devon);
font-weight: 600;
}
.devon-play-btn {
background: #4ab5a0;
background: var(--devon);
color: #fff;
border: none;
padding: 4px 12px;
padding: 8px 16px;
border-radius: var(--radius-sm);
cursor: pointer;
font-size: 0.8rem;
font-size: 0.85rem;
font-weight: 600;
transition: background 0.2s;
}
.devon-play-btn:hover {
background: #5cc5b0;
background: var(--devon-hover);
}
.devon-dismiss-btn {
background: none;
color: var(--text-muted);
border: 1px solid rgba(232, 121, 29, 0.15);
padding: 4px 10px;
padding: 8px 14px;
border-radius: var(--radius-sm);
cursor: pointer;
font-size: 0.8rem;
font-size: 0.85rem;
transition: all 0.2s;
}
@@ -1205,3 +1626,45 @@ section h2 {
color: var(--text);
border-color: rgba(232, 121, 29, 0.3);
}
/* Focus-visible styles for keyboard navigation */
button:focus-visible {
outline: 2px solid var(--accent);
outline-offset: 2px;
}
.devon-input:focus-visible,
.modal-content select:focus-visible,
.modal-content input:focus-visible,
.modal-content textarea:focus-visible {
outline: 2px solid var(--accent);
outline-offset: 1px;
}
/* Collapsible Server Log */
.log-section .log-body {
overflow: hidden;
transition: max-height 0.3s ease, opacity 0.3s ease;
max-height: 250px;
opacity: 1;
}
.log-section .log-body.collapsed {
max-height: 0;
opacity: 0;
}
.log-toggle-btn {
border: none;
background: none;
color: var(--text-muted);
cursor: pointer;
font-size: 0.8rem;
padding: 4px 8px;
border-radius: var(--radius-sm);
transition: color 0.2s;
}
.log-toggle-btn:hover {
color: var(--text);
}

View File

@@ -17,6 +17,17 @@
<button id="export-session-btn">Export</button>
<button id="settings-btn">Settings</button>
</div>
<div id="show-clock" class="show-clock">
<span class="clock-time" id="clock-time"></span>
<span id="show-timers" class="show-timers hidden">
<span class="clock-divider">|</span>
<span class="clock-label">On Air:</span>
<span class="clock-value" id="clock-runtime">0:00:00</span>
<span class="clock-divider">|</span>
<span class="clock-label">Est. Final:</span>
<span class="clock-value clock-estimate" id="clock-estimate">0:00</span>
</span>
</div>
</header>
<main>
@@ -71,7 +82,7 @@
<!-- Call Queue -->
<section class="queue-section">
<h2>Incoming Calls <span style="font-size:0.6em;font-weight:normal;color:var(--text-muted);">(208) 439-5853</span></h2>
<h2>Incoming Calls <span class="section-subtitle">(208) 439-5853</span></h2>
<div id="call-queue" class="call-queue">
<div class="queue-empty">No callers waiting</div>
</div>
@@ -88,7 +99,7 @@
<!-- Listener Emails -->
<section class="voicemail-section">
<h2>Emails <span id="email-badge" class="voicemail-badge hidden">0</span></h2>
<div id="email-list" class="voicemail-list" style="max-height:300px">
<div id="email-list" class="voicemail-list email-list">
<div class="queue-empty">No emails</div>
</div>
</section>
@@ -160,6 +171,7 @@
<div class="log-header">
<h2>Server Log</h2>
<div class="server-controls">
<button id="log-toggle-btn" class="log-toggle-btn">Show ▼</button>
<button id="restart-server-btn" class="server-btn restart">Restart</button>
<button id="stop-server-btn" class="server-btn stop">Stop</button>
<label class="auto-scroll-label">
@@ -167,7 +179,9 @@
</label>
</div>
</div>
<div class="log-body collapsed">
<div id="server-log" class="server-log"></div>
</div>
</section>
</main>
@@ -278,6 +292,6 @@
</div>
</div>
<script src="/js/app.js?v=20"></script>
<script src="/js/app.js?v=22"></script>
</body>
</html>

View File

@@ -17,6 +17,72 @@ let sounds = [];
let isMusicPlaying = false;
let soundboardExpanded = false;
// --- Show Clock ---
let showStartTime = null; // when ON AIR was pressed
let showContentTime = 0; // seconds of "active" content (calls, music, etc.)
let showContentTracking = false; // whether we're in active content right now
let showClockInterval = null;
function initClock() {
// Always show current time
if (!showClockInterval) {
showClockInterval = setInterval(updateShowClock, 1000);
updateShowClock();
}
}
function startShowClock() {
showStartTime = Date.now();
showContentTime = 0;
showContentTracking = false;
document.getElementById('show-timers')?.classList.remove('hidden');
}
function stopShowClock() {
document.getElementById('show-timers')?.classList.add('hidden');
showStartTime = null;
}
function updateShowClock() {
// Current time
const now = new Date();
const timeEl = document.getElementById('clock-time');
if (timeEl) timeEl.textContent = now.toLocaleTimeString('en-US', { hour: 'numeric', minute: '2-digit', second: '2-digit', hour12: true });
if (!showStartTime) return;
// Track content time — count seconds when a call is active or music is playing
const isContent = !!(currentCaller || isMusicPlaying);
if (isContent && !showContentTracking) {
showContentTracking = true;
} else if (!isContent && showContentTracking) {
showContentTracking = false;
}
if (isContent) showContentTime++;
// Show runtime (wall clock since ON AIR)
const runtimeSec = Math.floor((Date.now() - showStartTime) / 1000);
const runtimeEl = document.getElementById('clock-runtime');
if (runtimeEl) runtimeEl.textContent = formatDuration(runtimeSec);
// Estimated final length after post-prod
// Post-prod removes 2-8 second gaps (TTS latency). Estimate:
// - Content time stays ~100% (it's all talking/music)
// - Dead air (runtime - content) gets ~70% removed (not all silence is cut)
const deadAir = Math.max(0, runtimeSec - showContentTime);
const estimatedFinal = showContentTime + (deadAir * 0.3);
const estEl = document.getElementById('clock-estimate');
if (estEl) estEl.textContent = formatDuration(Math.round(estimatedFinal));
}
function formatDuration(totalSec) {
const h = Math.floor(totalSec / 3600);
const m = Math.floor((totalSec % 3600) / 60);
const s = totalSec % 60;
if (h > 0) return `${h}:${String(m).padStart(2, '0')}:${String(s).padStart(2, '0')}`;
return `${m}:${String(s).padStart(2, '0')}`;
}
// --- Helpers ---
function _isTyping() {
@@ -63,6 +129,7 @@ document.addEventListener('DOMContentLoaded', async () => {
await loadSounds();
await loadSettings();
initEventListeners();
initClock();
loadVoicemails();
setInterval(loadVoicemails, 30000);
loadEmails();
@@ -137,6 +204,17 @@ function initEventListeners() {
autoScroll = e.target.checked;
});
// Log toggle (collapsed by default)
const logToggleBtn = document.getElementById('log-toggle-btn');
if (logToggleBtn) {
logToggleBtn.addEventListener('click', () => {
const logBody = document.querySelector('.log-body');
if (!logBody) return;
const collapsed = logBody.classList.toggle('collapsed');
logToggleBtn.textContent = collapsed ? 'Show \u25BC' : 'Hide \u25B2';
});
}
// Start log polling
startLogPolling();
@@ -646,12 +724,17 @@ async function hangup() {
async function wrapUp() {
if (!currentCaller) return;
try {
await fetch('/api/wrap-up', { method: 'POST' });
const res = await fetch('/api/wrap-up', { method: 'POST' });
if (!res.ok) {
const err = await res.json().catch(() => ({}));
log(`Wrap-up failed: ${err.detail || res.status}`);
return;
}
const wrapupBtn = document.getElementById('wrapup-btn');
if (wrapupBtn) wrapupBtn.classList.add('active');
log(`Wrapping up ${currentCaller.name}...`);
} catch (err) {
console.error('wrapUp error:', err);
log(`Wrap-up error: ${err.message}`);
}
}
@@ -665,7 +748,7 @@ function toggleMusic() {
// --- Server-Side Recording ---
async function startRecording() {
if (!currentCaller || isProcessing) return;
if (isProcessing) return;
try {
const res = await fetch('/api/record/start', { method: 'POST' });
@@ -708,7 +791,12 @@ async function stopRecording() {
addMessage('You', data.text);
// Chat
if (!currentCaller) {
// No active call — route voice to Devon
showStatus('Devon is thinking...');
await askDevon(data.text, { skipHostMessage: true });
} else {
// Active call — talk to caller
showStatus(`${currentCaller.name} is thinking...`);
const chatData = await safeFetch('/api/chat', {
@@ -717,10 +805,13 @@ async function stopRecording() {
body: JSON.stringify({ text: data.text })
});
// If routed to Devon, the SSE broadcast handles the message
if (chatData.routed_to !== 'devon') {
addMessage(chatData.caller, chatData.text);
}
// TTS (plays on server) - only if we have text
if (chatData.text && chatData.text.trim()) {
// TTS (plays on server) - only if we have text and not routed to Devon
if (chatData.text && chatData.text.trim() && chatData.routed_to !== 'devon') {
showStatus(`${currentCaller.name} is speaking...`);
await safeFetch('/api/tts', {
@@ -733,6 +824,7 @@ async function stopRecording() {
})
});
}
}
} catch (err) {
log('Error: ' + err.message);
@@ -763,10 +855,12 @@ async function sendTypedMessage() {
body: JSON.stringify({ text })
});
if (chatData.routed_to !== 'devon') {
addMessage(chatData.caller, chatData.text);
}
// TTS (plays on server) - only if we have text
if (chatData.text && chatData.text.trim()) {
// TTS (plays on server) - only if we have text and not routed to Devon
if (chatData.text && chatData.text.trim() && chatData.routed_to !== 'devon') {
showStatus(`${currentCaller.name} is speaking...`);
await safeFetch('/api/tts', {
@@ -983,12 +1077,12 @@ async function loadSounds() {
if (!board) return;
board.innerHTML = '';
const pinnedNames = ['cheer', 'applause', 'boo'];
const pinnedNames = ['cheer', 'applause', 'boo', 'correct'];
const pinned = [];
const rest = [];
sounds.forEach(sound => {
const lower = (sound.name || sound.file || '').toLowerCase();
const lower = ((sound.name || '') + ' ' + (sound.file || '')).toLowerCase();
if (pinnedNames.some(p => lower.includes(p))) {
pinned.push(sound);
} else {
@@ -1156,6 +1250,12 @@ function addMessage(sender, text) {
className += ' host';
} else if (sender === 'System') {
className += ' system';
// System messages are compact — no avatar, small text
div.className = className;
div.innerHTML = `<div class="msg-content system-compact">${text}</div>`;
chat.appendChild(div);
chat.scrollTop = chat.scrollHeight;
return;
} else if (sender === 'DEVON') {
className += ' devon';
} else if (sender.includes('(caller)') || sender.includes('Caller #')) {
@@ -1165,7 +1265,21 @@ function addMessage(sender, text) {
}
div.className = className;
div.innerHTML = `<strong>${sender}:</strong> ${text}`;
// Build avatar — real face images from /api/avatar/{name}
let avatarHtml = '';
if (sender === 'You') {
avatarHtml = '<img class="msg-avatar" src="/images/host-avatar.png" alt="Luke">';
} else if (sender === 'DEVON') {
avatarHtml = '<img class="msg-avatar msg-avatar-devon" src="/api/avatar/Devon" alt="Devon">';
} else if (sender === 'System') {
avatarHtml = '<span class="msg-avatar msg-avatar-system">!</span>';
} else {
const name = sender.replace(/[^a-zA-Z]/g, '') || 'Caller';
avatarHtml = `<img class="msg-avatar msg-avatar-caller" src="/api/avatar/${encodeURIComponent(name)}" alt="${name}">`;
}
div.innerHTML = `${avatarHtml}<div class="msg-content"><strong>${sender}:</strong> ${text}</div>`;
chat.appendChild(div);
chat.scrollTop = chat.scrollHeight;
}
@@ -1185,6 +1299,8 @@ function updateOnAirBtn(btn, isOn) {
btn.classList.toggle('on', isOn);
btn.classList.toggle('off', !isOn);
btn.textContent = isOn ? 'ON AIR' : 'OFF AIR';
if (isOn && !showStartTime) startShowClock();
else if (!isOn && showStartTime) stopShowClock();
}
@@ -1712,8 +1828,8 @@ async function deleteEmail(id) {
// --- Devon (Intern) ---
async function askDevon(question) {
addMessage('You', `Devon, ${question}`);
async function askDevon(question, { skipHostMessage = false } = {}) {
if (!skipHostMessage) addMessage('You', `Devon, ${question}`);
log(`[Devon] Looking up: ${question}`);
try {
const res = await safeFetch('/api/intern/ask', {
@@ -1722,7 +1838,7 @@ async function askDevon(question) {
body: JSON.stringify({ question }),
});
if (res.text) {
addMessage('DEVON', res.text);
// Don't addMessage here — the SSE broadcast_event("intern_response") handles it
log(`[Devon] Responded (tools: ${(res.sources || []).join(', ') || 'none'})`);
} else {
log('[Devon] No response');
@@ -1737,7 +1853,7 @@ async function interjectDevon() {
try {
const res = await safeFetch('/api/intern/interject', { method: 'POST' });
if (res.text) {
addMessage('DEVON', res.text);
// Don't addMessage here — SSE broadcast handles it
log('[Devon] Interjected');
} else {
log('[Devon] Nothing to add');
@@ -1772,9 +1888,7 @@ function showDevonSuggestion(text) {
async function playDevonSuggestion() {
try {
const res = await safeFetch('/api/intern/suggestion/play', { method: 'POST' });
if (res.text) {
addMessage('DEVON', res.text);
}
// Don't addMessage here — SSE broadcast handles it
document.getElementById('devon-suggestion')?.classList.add('hidden');
log('[Devon] Played suggestion');
} catch (err) {

View File

@@ -0,0 +1,920 @@
-- Post-Production Script for REAPER
-- Phase 1: Strip long silences from DIALOG regions (all tracks except music)
-- Phase 2: Normalize AD/IDENT/music volume to match dialog
-- Phase 3: Trim music to length of longest voice track with fade-out
-- Phase 4: Mute music during AD/IDENT regions with fade in/out
---------------------------------------------------------------------------
-- SETTINGS
---------------------------------------------------------------------------
local SILENCE_DB = -30 -- dBFS — anything below this is "silence"
local MIN_SILENCE_SEC = 6.0 -- only remove silences longer than this
local MIN_VOICE_SEC = 0.3 -- ignore non-silent bursts shorter than this (filters transients)
local KEEP_PAD_SEC = 0.5 -- leave this much silence on each side of a cut
local BLOCK_SEC = 0.1 -- analysis block size (100ms)
local SAMPLE_RATE = 48000
local CHECK_TRACKS = {1, 2, 3} -- 1-indexed: Host, Live Caller, AI Caller
local IDENTS_TRACK = 5 -- 1-indexed: Idents track
local ADS_TRACK = 6 -- 1-indexed: Ads track
local MUSIC_TRACK = 7 -- 1-indexed: Music track
local MUSIC_FADE_SEC = 2.0 -- fade duration for music in/out around ads/idents
local YIELD_INTERVAL = 200 -- yield to REAPER every N blocks (~20s of audio)
---------------------------------------------------------------------------
local BLOCK_SAMPLES = math.floor(SAMPLE_RATE * BLOCK_SEC)
local THRESHOLD = 10 ^ (SILENCE_DB / 20)
local MIN_VOICE_BLOCKS = math.ceil(MIN_VOICE_SEC / BLOCK_SEC)
local function log(msg)
reaper.ShowConsoleMsg("[PostProd] " .. msg .. "\n")
end
---------------------------------------------------------------------------
-- Progress window (gfx)
---------------------------------------------------------------------------
local progress_phase = ""
local progress_pct = 0
local progress_detail = ""
local function progress_init()
gfx.init("Post-Production", 420, 60)
gfx.setfont(1, "Arial", 14)
end
local function progress_draw()
if gfx.getchar() < 0 then return false end
gfx.set(0.12, 0.12, 0.12)
gfx.rect(0, 0, 420, 60, true)
-- Label
gfx.set(1, 1, 1)
gfx.x = 10; gfx.y = 8
gfx.drawstr(progress_phase)
gfx.x = 300; gfx.y = 8
gfx.drawstr(progress_detail)
-- Bar background
gfx.set(0.25, 0.25, 0.25)
gfx.rect(10, 32, 400, 18, true)
-- Bar fill
gfx.set(0.2, 0.7, 0.3)
local fill = math.min(math.floor(400 * progress_pct), 400)
if fill > 0 then gfx.rect(10, 32, fill, 18, true) end
gfx.update()
return true
end
local function progress_close()
gfx.quit()
end
---------------------------------------------------------------------------
-- Region helpers
---------------------------------------------------------------------------
local function get_regions_by_type(type_pattern)
local regions = {}
local _, num_markers, num_regions = reaper.CountProjectMarkers(0)
local total = num_markers + num_regions
for i = 0, total - 1 do
local retval, is_region, pos, rgnend, name, idx = reaper.EnumProjectMarkers(i)
if is_region and name and name:match(type_pattern) then
table.insert(regions, {start_pos = pos, end_pos = rgnend, name = name})
end
end
table.sort(regions, function(a, b) return a.start_pos < b.start_pos end)
return regions
end
local function merge_regions(regions)
if #regions <= 1 then return regions end
table.sort(regions, function(a, b) return a.start_pos < b.start_pos end)
local merged = {{start_pos = regions[1].start_pos, end_pos = regions[1].end_pos, name = "MERGED 1"}}
for i = 2, #regions do
local prev = merged[#merged]
if regions[i].start_pos <= prev.end_pos then
prev.end_pos = math.max(prev.end_pos, regions[i].end_pos)
else
table.insert(merged, {start_pos = regions[i].start_pos, end_pos = regions[i].end_pos, name = "MERGED " .. (#merged + 1)})
end
end
return merged
end
local function shift_regions(removals)
local _, num_markers, num_regions = reaper.CountProjectMarkers(0)
local total_markers = num_markers + num_regions
local markers = {}
for i = 0, total_markers - 1 do
local retval, is_region, pos, rgnend, name, idx, color = reaper.EnumProjectMarkers3(0, i)
if retval then
table.insert(markers, {is_region=is_region, pos=pos, rgnend=rgnend, name=name, idx=idx, color=color})
end
end
for _, m in ipairs(markers) do
local pos_shift = 0
for _, r in ipairs(removals) do
if r.end_pos <= m.pos then
pos_shift = pos_shift + (r.end_pos - r.start_pos)
elseif r.start_pos < m.pos then
pos_shift = pos_shift + (m.pos - r.start_pos)
end
end
m.new_pos = m.pos - pos_shift
if m.is_region then
local end_shift = 0
for _, r in ipairs(removals) do
if r.end_pos <= m.rgnend then
end_shift = end_shift + (r.end_pos - r.start_pos)
elseif r.start_pos < m.rgnend then
end_shift = end_shift + (m.rgnend - r.start_pos)
end
end
m.new_end = m.rgnend - end_shift
end
end
for _, m in ipairs(markers) do
if m.is_region then
reaper.SetProjectMarker3(0, m.idx, true, m.new_pos, m.new_end, m.name, m.color)
else
reaper.SetProjectMarker3(0, m.idx, false, m.new_pos, 0, m.name, m.color)
end
end
end
local function find_item_at(track, pos)
for i = 0, reaper.CountTrackMediaItems(track) - 1 do
local item = reaper.GetTrackMediaItem(track, i)
local item_start = reaper.GetMediaItemInfo_Value(item, "D_POSITION")
local item_len = reaper.GetMediaItemInfo_Value(item, "D_LENGTH")
if pos >= item_start and pos < item_start + item_len then
return item
end
end
return nil
end
---------------------------------------------------------------------------
-- Phase 1: Silence detection and removal
---------------------------------------------------------------------------
-- Read audio directly from WAV files (bypasses REAPER accessor — immune to undo issues)
local function parse_wav_header(filepath)
local f = io.open(filepath, "rb")
if not f then return nil end
local riff = f:read(4)
if riff ~= "RIFF" then f:close(); return nil end
f:read(4) -- file size
if f:read(4) ~= "WAVE" then f:close(); return nil end
local fmt_info = nil
while true do
local id = f:read(4)
if not id then f:close(); return nil end
local size = string.unpack("<I4", f:read(4))
if id == "fmt " then
local audio_fmt = string.unpack("<I2", f:read(2))
local channels = string.unpack("<I2", f:read(2))
local sr = string.unpack("<I4", f:read(4))
f:read(4) -- byte rate
f:read(2) -- block align
local bps = string.unpack("<I2", f:read(2))
if size > 16 then f:read(size - 16) end
fmt_info = {audio_fmt = audio_fmt, channels = channels, sample_rate = sr, bps = bps}
elseif id == "data" then
if not fmt_info then f:close(); return nil end
local data_offset = f:seek()
f:close()
fmt_info.data_offset = data_offset
fmt_info.data_size = size
fmt_info.filepath = filepath
fmt_info.bytes_per_sample = fmt_info.bps / 8
fmt_info.frame_size = fmt_info.channels * fmt_info.bytes_per_sample
return fmt_info
else
f:read(size)
end
end
end
local function get_track_audio(track_idx_1based)
local track = reaper.GetTrack(0, track_idx_1based - 1)
if not track or reaper.CountTrackMediaItems(track) == 0 then return nil end
local segments = {}
for i = 0, reaper.CountTrackMediaItems(track) - 1 do
local item = reaper.GetTrackMediaItem(track, i)
local take = reaper.GetActiveTake(item)
if take then
local source = reaper.GetMediaItemTake_Source(take)
local filepath = reaper.GetMediaSourceFileName(source)
local wav = parse_wav_header(filepath)
if wav then
local item_pos = reaper.GetMediaItemInfo_Value(item, "D_POSITION")
local item_len = reaper.GetMediaItemInfo_Value(item, "D_LENGTH")
local take_offset = reaper.GetMediaItemTakeInfo_Value(take, "D_STARTOFFS")
local fh = io.open(filepath, "rb")
if fh then
table.insert(segments, {
fh = fh,
wav = wav,
item_pos = item_pos,
item_end = item_pos + item_len,
take_offset = take_offset,
})
end
else
log(" WARNING: Could not parse WAV header for: " .. filepath)
end
end
end
if #segments == 0 then return nil end
-- Sort by position so binary-style lookup is possible
table.sort(segments, function(a, b) return a.item_pos < b.item_pos end)
return {
segments = segments,
item_pos = segments[1].item_pos,
item_end = segments[#segments].item_end,
}
end
local function destroy_track_audio(ta)
for _, seg in ipairs(ta.segments) do
if seg.fh then seg.fh:close(); seg.fh = nil end
end
end
local function read_block_peak_rms_segment(seg, project_time)
local source_time = project_time - seg.item_pos + seg.take_offset
if source_time < 0 then return 0, 0 end
local wav = seg.wav
local sample_offset = math.floor(source_time * wav.sample_rate)
local byte_offset = wav.data_offset + sample_offset * wav.frame_size
local bytes_needed = BLOCK_SAMPLES * wav.frame_size
if byte_offset + bytes_needed > wav.data_offset + wav.data_size then
return 0, 0
end
seg.fh:seek("set", byte_offset)
local raw = seg.fh:read(bytes_needed)
if not raw or #raw < bytes_needed then return 0, 0 end
local peak = 0
local sum_sq = 0
local bps = wav.bytes_per_sample
for i = 0, BLOCK_SAMPLES - 1 do
local offset = i * wav.frame_size
local v = 0
if wav.audio_fmt == 3 then
v = string.unpack("<f", raw, offset + 1)
elseif bps == 3 then
local b1, b2, b3 = string.byte(raw, offset + 1, offset + 3)
local val = b1 + b2 * 256 + b3 * 65536
if val >= 8388608 then val = val - 16777216 end
v = val / 8388608.0
elseif bps == 2 then
v = string.unpack("<i2", raw, offset + 1) / 32768.0
elseif bps == 4 and wav.audio_fmt == 1 then
v = string.unpack("<i4", raw, offset + 1) / 2147483648.0
end
sum_sq = sum_sq + v * v
local av = math.abs(v)
if av > peak then peak = av end
end
return peak, sum_sq
end
local function read_block_peak_rms(ta, project_time)
-- Find the segment that contains this project time
for _, seg in ipairs(ta.segments) do
if project_time >= seg.item_pos and project_time < seg.item_end then
return read_block_peak_rms_segment(seg, project_time)
end
end
return 0, 0
end
-- find_silences: detects silences and accumulates RMS data
-- Yields periodically via coroutine for UI responsiveness
-- progress_fn(t): called before each yield with current position
local function find_silences(region, track_audios, rms_acc, progress_fn)
local silences = {}
local in_silence = false
local silence_start = 0
local voice_run = 0
local t = region.start_pos
local total_blocks = 0
local silent_blocks = 0
local yield_count = 0
while t < region.end_pos do
local best_peak = 0
local best_sum = 0
for _, ta in ipairs(track_audios) do
local peak, sum_sq = read_block_peak_rms(ta, t)
if peak > best_peak then
best_peak = peak
best_sum = sum_sq
end
end
local all_silent = best_peak < THRESHOLD
total_blocks = total_blocks + 1
if all_silent then silent_blocks = silent_blocks + 1 end
if not all_silent and rms_acc then
rms_acc.sum_sq = rms_acc.sum_sq + best_sum
rms_acc.count = rms_acc.count + BLOCK_SAMPLES
end
if in_silence then
if all_silent then
voice_run = 0
else
voice_run = voice_run + 1
if voice_run >= MIN_VOICE_BLOCKS then
local voice_start = t - (voice_run - 1) * BLOCK_SEC
local dur = voice_start - silence_start
if dur >= MIN_SILENCE_SEC then
table.insert(silences, {start_pos = silence_start, end_pos = voice_start, duration = dur})
end
in_silence = false
voice_run = 0
end
end
else
if all_silent then
in_silence = true
silence_start = t
voice_run = 0
end
end
t = t + BLOCK_SEC
-- Yield periodically so REAPER stays responsive
yield_count = yield_count + 1
if yield_count >= YIELD_INTERVAL then
yield_count = 0
if progress_fn then progress_fn(t) end
coroutine.yield()
end
end
if in_silence then
local dur = region.end_pos - silence_start
if dur >= MIN_SILENCE_SEC then
table.insert(silences, {start_pos = silence_start, end_pos = region.end_pos, duration = dur})
end
end
return silences, total_blocks, silent_blocks
end
local function phase1_strip_silence(dialog_regions)
dialog_regions = merge_regions(dialog_regions)
log("Phase 1: " .. #dialog_regions .. " merged DIALOG region(s)")
local track_audios = {}
local tracks_loaded = 0
for _, tidx in ipairs(CHECK_TRACKS) do
local ta = get_track_audio(tidx)
if ta then
table.insert(track_audios, ta)
tracks_loaded = tracks_loaded + 1
local first_wav = ta.segments[1].wav
local fmt = first_wav.audio_fmt == 3 and "float" or (first_wav.bps .. "bit")
log(" Track " .. tidx .. ": " .. #ta.segments .. " item(s), " .. fmt .. " " .. first_wav.sample_rate .. "Hz (pos=" .. string.format("%.1f", ta.item_pos) .. " end=" .. string.format("%.1f", ta.item_end) .. ")")
else
log(" WARNING: Track " .. tidx .. " has no audio items — silence detection will NOT check this track")
end
end
if tracks_loaded == 0 then
log("Phase 1: No audio found on voice tracks — skipping")
return false, 0
end
if tracks_loaded < #CHECK_TRACKS then
log(" *** Only " .. tracks_loaded .. "/" .. #CHECK_TRACKS .. " voice tracks have audio — silence may be over-detected ***")
end
-- Load AD/IDENT regions so we can protect them from silence removal
local protected_regions = {}
for _, r in ipairs(get_regions_by_type("^AD%s+%d+$")) do table.insert(protected_regions, r) end
for _, r in ipairs(get_regions_by_type("^IDENT%s+%d+$")) do table.insert(protected_regions, r) end
table.sort(protected_regions, function(a, b) return a.start_pos < b.start_pos end)
if #protected_regions > 0 then
log(" Protecting " .. #protected_regions .. " AD/IDENT region(s) from silence removal")
end
log("Phase 1: Analyzing using " .. tracks_loaded .. "/" .. #CHECK_TRACKS .. " voice tracks")
log(" threshold=" .. SILENCE_DB .. "dB, min_silence=" .. MIN_SILENCE_SEC .. "s, pad=" .. KEEP_PAD_SEC .. "s")
-- Calculate total duration for progress tracking
local total_duration = 0
for _, rgn in ipairs(dialog_regions) do
total_duration = total_duration + (rgn.end_pos - rgn.start_pos)
end
local processed_duration = 0
local rms_acc = {sum_sq = 0, count = 0}
local removals = {}
local total_blocks = 0
local silent_blocks = 0
for ri, rgn in ipairs(dialog_regions) do
local rgn_dur = rgn.end_pos - rgn.start_pos
local function update_progress(t)
local rgn_progress = (t - rgn.start_pos) / rgn_dur
progress_pct = (processed_duration + rgn_progress * rgn_dur) / total_duration
progress_phase = "Phase 1: Scanning"
progress_detail = string.format("Region %d/%d", ri, #dialog_regions)
end
local silences, rgn_total, rgn_silent = find_silences(rgn, track_audios, rms_acc, update_progress)
processed_duration = processed_duration + rgn_dur
total_blocks = total_blocks + rgn_total
silent_blocks = silent_blocks + rgn_silent
log(" " .. rgn.name .. ": " .. rgn_total .. " blocks, " .. rgn_silent .. " silent (" .. string.format("%.0f", rgn_silent/math.max(rgn_total,1)*100) .. "%)")
for _, s in ipairs(silences) do
local rm_start = s.start_pos + KEEP_PAD_SEC
local rm_end = s.end_pos - KEEP_PAD_SEC
if rm_end > rm_start + 0.05 then
-- Check if this silence overlaps with any AD/IDENT region
local protected = false
for _, pr in ipairs(protected_regions) do
if rm_start < pr.end_pos and rm_end > pr.start_pos then
protected = true
log(" SKIP " .. string.format("%.1f", rm_end - rm_start) .. "s at " .. string.format("%.1f", s.start_pos) .. "-" .. string.format("%.1f", s.end_pos) .. " (overlaps " .. pr.name .. ")")
break
end
end
if not protected then
table.insert(removals, {start_pos = rm_start, end_pos = rm_end})
log(" remove " .. string.format("%.1f", rm_end - rm_start) .. "s at " .. string.format("%.1f", s.start_pos) .. "-" .. string.format("%.1f", s.end_pos))
end
end
end
end
for _, ta in ipairs(track_audios) do
destroy_track_audio(ta)
end
log("Phase 1: Total " .. total_blocks .. " blocks, " .. silent_blocks .. " silent (" .. string.format("%.0f", silent_blocks/math.max(total_blocks,1)*100) .. "%)")
local dialog_rms_db = nil
if rms_acc.count > 0 then
local rms = math.sqrt(rms_acc.sum_sq / rms_acc.count)
if rms > 0 then dialog_rms_db = 20 * math.log(rms, 10) end
end
if #removals == 0 then
log("Phase 1: No long silences found")
return true, dialog_rms_db
end
local total_removed = 0
for _, r in ipairs(removals) do
total_removed = total_removed + (r.end_pos - r.start_pos)
end
local msg = string.format(
"Phase 1: Found %d silence(s) totaling %.1fs to remove.\n\nProceed?",
#removals, total_removed
)
if reaper.ShowMessageBox(msg, "Strip Silence", 1) ~= 1 then return false end
-- Modification phase — prevent UI refresh for performance, but yield for progress
progress_phase = "Phase 1: Removing"
reaper.PreventUIRefresh(1)
for i = #removals, 1, -1 do
local r = removals[i]
local remove_len = r.end_pos - r.start_pos
for t = 0, reaper.CountTracks(0) - 1 do
if (t + 1) == MUSIC_TRACK then goto next_track end
local track = reaper.GetTrack(0, t)
local item = find_item_at(track, r.start_pos)
if item then
local right = reaper.SplitMediaItem(item, r.start_pos)
if right then
reaper.SplitMediaItem(right, r.end_pos)
reaper.DeleteTrackMediaItem(track, right)
end
end
for j = 0, reaper.CountTrackMediaItems(track) - 1 do
local shift_item = reaper.GetTrackMediaItem(track, j)
local pos = reaper.GetMediaItemInfo_Value(shift_item, "D_POSITION")
if pos >= r.start_pos then
reaper.SetMediaItemInfo_Value(shift_item, "D_POSITION", pos - remove_len)
end
end
::next_track::
end
-- Yield every 5 removals to update progress
if i % 5 == 0 then
progress_pct = (#removals - i) / #removals
progress_detail = string.format("%d/%d cuts", #removals - i, #removals)
reaper.PreventUIRefresh(-1)
coroutine.yield()
reaper.PreventUIRefresh(1)
end
end
reaper.PreventUIRefresh(-1)
shift_regions(removals)
log("Phase 1: Removed " .. #removals .. " silence(s), " .. string.format("%.1f", total_removed) .. "s total")
return true, dialog_rms_db
end
---------------------------------------------------------------------------
-- Phase 2: Normalize AD/IDENT volume to match dialog
---------------------------------------------------------------------------
local function normalize_track_regions(track_idx, regions, target_db)
local track = reaper.GetTrack(0, track_idx - 1)
if not track or reaper.CountTrackMediaItems(track) == 0 then return end
for _, rgn in ipairs(regions) do
local item = find_item_at(track, rgn.start_pos)
if not item then goto next_region end
local item_start = reaper.GetMediaItemInfo_Value(item, "D_POSITION")
local segment = item
if item_start < rgn.start_pos - 0.01 then
segment = reaper.SplitMediaItem(item, rgn.start_pos)
if not segment then goto next_region end
end
local seg_end = reaper.GetMediaItemInfo_Value(segment, "D_POSITION")
+ reaper.GetMediaItemInfo_Value(segment, "D_LENGTH")
if rgn.end_pos < seg_end - 0.01 then
reaper.SplitMediaItem(segment, rgn.end_pos)
end
local take = reaper.GetActiveTake(segment)
if not take then goto next_region end
local seg_pos = reaper.GetMediaItemInfo_Value(segment, "D_POSITION")
local seg_len = reaper.GetMediaItemInfo_Value(segment, "D_LENGTH")
local seg_offset = reaper.GetMediaItemTakeInfo_Value(take, "D_STARTOFFS")
local accessor = reaper.CreateTakeAudioAccessor(take)
local sum_sq = 0
local count = 0
local t = seg_pos
while t < seg_pos + seg_len do
local source_time = t - seg_pos + seg_offset
local buf = reaper.new_array(BLOCK_SAMPLES)
reaper.GetAudioAccessorSamples(accessor, SAMPLE_RATE, 1, source_time, BLOCK_SAMPLES, buf)
for i = 1, BLOCK_SAMPLES do
sum_sq = sum_sq + buf[i] * buf[i]
end
count = count + BLOCK_SAMPLES
t = t + BLOCK_SEC
end
reaper.DestroyAudioAccessor(accessor)
if count > 0 then
local item_rms = math.sqrt(sum_sq / count)
if item_rms > 0 then
local item_db = 20 * math.log(item_rms, 10)
local gain_db = target_db - item_db
local gain_linear = 10 ^ (gain_db / 20)
local current_vol = reaper.GetMediaItemInfo_Value(segment, "D_VOL")
reaper.SetMediaItemInfo_Value(segment, "D_VOL", current_vol * gain_linear)
log(" " .. rgn.name .. ": " .. string.format("%+.1f", gain_db) .. "dB adjustment")
end
end
::next_region::
end
end
local function normalize_music_track(dialog_regions, target_db)
local track = reaper.GetTrack(0, MUSIC_TRACK - 1)
if not track or reaper.CountTrackMediaItems(track) == 0 then return end
local sum_sq = 0
local count = 0
for _, rgn in ipairs(dialog_regions) do
for i = 0, reaper.CountTrackMediaItems(track) - 1 do
local item = reaper.GetTrackMediaItem(track, i)
local take = reaper.GetActiveTake(item)
if not take then goto next_item end
local item_pos = reaper.GetMediaItemInfo_Value(item, "D_POSITION")
local item_len = reaper.GetMediaItemInfo_Value(item, "D_LENGTH")
local item_end = item_pos + item_len
local take_offset = reaper.GetMediaItemTakeInfo_Value(take, "D_STARTOFFS")
local mstart = math.max(item_pos, rgn.start_pos)
local mend = math.min(item_end, rgn.end_pos)
if mstart >= mend then goto next_item end
local accessor = reaper.CreateTakeAudioAccessor(take)
local t = mstart
while t < mend do
local source_time = t - item_pos + take_offset
local buf = reaper.new_array(BLOCK_SAMPLES)
reaper.GetAudioAccessorSamples(accessor, SAMPLE_RATE, 1, source_time, BLOCK_SAMPLES, buf)
local peak = 0
local block_sum = 0
for j = 1, BLOCK_SAMPLES do
local v = buf[j]
block_sum = block_sum + v * v
local av = math.abs(v)
if av > peak then peak = av end
end
if peak >= THRESHOLD then
sum_sq = sum_sq + block_sum
count = count + BLOCK_SAMPLES
end
t = t + BLOCK_SEC
end
reaper.DestroyAudioAccessor(accessor)
::next_item::
end
end
if count == 0 then
log(" Music: no audio detected — skipping")
return
end
local music_rms = math.sqrt(sum_sq / count)
if music_rms > 0 then
local music_db = 20 * math.log(music_rms, 10)
local gain_db = target_db - music_db
local gain_linear = 10 ^ (gain_db / 20)
for i = 0, reaper.CountTrackMediaItems(track) - 1 do
local item = reaper.GetTrackMediaItem(track, i)
local current_vol = reaper.GetMediaItemInfo_Value(item, "D_VOL")
reaper.SetMediaItemInfo_Value(item, "D_VOL", current_vol * gain_linear)
end
log(" Music: " .. string.format("%+.1f", gain_db) .. "dB adjustment")
end
end
local function phase2_normalize(dialog_regions, ad_regions, ident_regions, dialog_rms_db)
progress_phase = "Phase 2: Normalizing"
progress_pct = 0
progress_detail = ""
coroutine.yield()
if not dialog_rms_db then
log("Phase 2: Could not measure dialog loudness — skipping")
return
end
log("Phase 2: Dialog RMS = " .. string.format("%.1f", dialog_rms_db) .. " dBFS")
local dialog_db = dialog_rms_db
if #ad_regions > 0 then
progress_detail = "Ads"
coroutine.yield()
log("Phase 2: Normalizing " .. #ad_regions .. " AD region(s)...")
normalize_track_regions(ADS_TRACK, ad_regions, dialog_db)
end
if #ident_regions > 0 then
progress_detail = "Idents"
progress_pct = 0.33
coroutine.yield()
log("Phase 2: Normalizing " .. #ident_regions .. " IDENT region(s)...")
normalize_track_regions(IDENTS_TRACK, ident_regions, dialog_db)
end
progress_detail = "Music"
progress_pct = 0.66
coroutine.yield()
log("Phase 2: Normalizing music track...")
normalize_music_track(dialog_regions, dialog_db)
progress_pct = 1.0
end
---------------------------------------------------------------------------
-- Phase 3: Trim music to voice length
-- Phase 4: Mute music during AD/IDENT regions with fades
---------------------------------------------------------------------------
local function phase3_trim_music()
progress_phase = "Phase 3: Trimming music"
progress_pct = 0
progress_detail = ""
coroutine.yield()
local music_track = reaper.GetTrack(0, MUSIC_TRACK - 1)
if not music_track then return end
local last_end = 0
for _, tidx in ipairs(CHECK_TRACKS) do
local tr = reaper.GetTrack(0, tidx - 1)
if tr then
local n = reaper.CountTrackMediaItems(tr)
if n > 0 then
local item = reaper.GetTrackMediaItem(tr, n - 1)
local item_end = reaper.GetMediaItemInfo_Value(item, "D_POSITION")
+ reaper.GetMediaItemInfo_Value(item, "D_LENGTH")
if item_end > last_end then last_end = item_end end
end
end
end
if last_end == 0 then return end
local item = find_item_at(music_track, last_end - 0.01)
if not item then
local n = reaper.CountTrackMediaItems(music_track)
if n > 0 then
item = reaper.GetTrackMediaItem(music_track, n - 1)
end
end
if not item then
log("Phase 3: No music item to trim")
return
end
local item_start = reaper.GetMediaItemInfo_Value(item, "D_POSITION")
local item_end = item_start + reaper.GetMediaItemInfo_Value(item, "D_LENGTH")
if last_end < item_end then
reaper.SetMediaItemInfo_Value(item, "D_LENGTH", last_end - item_start)
reaper.SetMediaItemInfo_Value(item, "D_FADEOUTLEN", MUSIC_FADE_SEC)
log("Phase 3: Trimmed music at " .. string.format("%.1f", last_end) .. "s with " .. MUSIC_FADE_SEC .. "s fade-out")
local i = reaper.CountTrackMediaItems(music_track) - 1
while i >= 0 do
local check = reaper.GetTrackMediaItem(music_track, i)
local check_start = reaper.GetMediaItemInfo_Value(check, "D_POSITION")
if check_start >= last_end then
reaper.DeleteTrackMediaItem(music_track, check)
end
i = i - 1
end
else
log("Phase 3: Music already ends before last voice audio — adding fade-out")
reaper.SetMediaItemInfo_Value(item, "D_FADEOUTLEN", MUSIC_FADE_SEC)
end
progress_pct = 1.0
end
local function phase4_music_fades(ad_ident_regions)
progress_phase = "Phase 4: Music fades"
progress_pct = 0
progress_detail = ""
coroutine.yield()
local music_track = reaper.GetTrack(0, MUSIC_TRACK - 1)
if not music_track or reaper.CountTrackMediaItems(music_track) == 0 then
log("Phase 4: No music track/items found — skipping")
return
end
log("Phase 4: Processing " .. #ad_ident_regions .. " AD/IDENT region(s)...")
for ri, rgn in ipairs(ad_ident_regions) do
local fade_point = rgn.start_pos - MUSIC_FADE_SEC
local item = find_item_at(music_track, math.max(fade_point, 0))
if not item then
item = find_item_at(music_track, rgn.start_pos)
end
if not item then
log(" " .. rgn.name .. ": no music item found — skipping")
goto continue
end
local item_start = reaper.GetMediaItemInfo_Value(item, "D_POSITION")
local split_pos = math.max(fade_point, item_start + 0.01)
local mid = reaper.SplitMediaItem(item, split_pos)
if mid then
reaper.SetMediaItemInfo_Value(item, "D_FADEOUTLEN", MUSIC_FADE_SEC)
local after = reaper.SplitMediaItem(mid, rgn.end_pos)
reaper.SetMediaItemInfo_Value(mid, "B_MUTE", 1)
if after then
reaper.SetMediaItemInfo_Value(after, "D_FADEINLEN", MUSIC_FADE_SEC)
end
log(" " .. rgn.name .. ": muted music, fade out/in (" .. MUSIC_FADE_SEC .. "s)")
end
progress_pct = ri / #ad_ident_regions
progress_detail = string.format("%d/%d", ri, #ad_ident_regions)
::continue::
end
end
---------------------------------------------------------------------------
-- Main (coroutine-based for UI responsiveness)
---------------------------------------------------------------------------
local function do_work()
local dialog_regions = get_regions_by_type("^DIALOG%s+%d+$")
if #dialog_regions == 0 then
reaper.ShowMessageBox("No DIALOG regions found.", "Post-Production", 0)
return
end
reaper.Undo_BeginBlock()
-- Phase 1: Strip silence (analysis yields for progress, removal uses PreventUIRefresh)
local ok, dialog_rms_db = phase1_strip_silence(dialog_regions)
if not ok then
reaper.Undo_EndBlock("Post-production: cancelled", -1)
log("Cancelled.")
return
end
-- Re-read regions after ripple edits
dialog_regions = get_regions_by_type("^DIALOG%s+%d+$")
local ad_regions = get_regions_by_type("^AD%s+%d+$")
local ident_regions = get_regions_by_type("^IDENT%s+%d+$")
local ad_ident_regions = {}
for _, r in ipairs(ad_regions) do table.insert(ad_ident_regions, r) end
for _, r in ipairs(ident_regions) do table.insert(ad_ident_regions, r) end
table.sort(ad_ident_regions, function(a, b) return a.start_pos < b.start_pos end)
reaper.PreventUIRefresh(1)
-- Phase 2: Normalize
if #ad_regions > 0 or #ident_regions > 0 then
phase2_normalize(dialog_regions, ad_regions, ident_regions, dialog_rms_db)
else
log("Phase 2: No AD/IDENT regions found — skipping")
end
-- Phase 3: Trim music
phase3_trim_music()
-- Phase 4: Music fades
if #ad_ident_regions > 0 then
phase4_music_fades(ad_ident_regions)
else
log("Phase 4: No AD/IDENT regions found — skipping")
end
reaper.PreventUIRefresh(-1)
reaper.Undo_EndBlock("Post-production: strip silence + music fades", -1)
reaper.UpdateArrange()
log("All phases complete!")
end
-- Coroutine runner with progress window
local work_co
local function work_loop()
if not work_co or coroutine.status(work_co) == "dead" then
progress_phase = "Done!"
progress_pct = 1.0
progress_detail = ""
progress_draw()
progress_close()
return
end
progress_draw()
local ok, err = coroutine.resume(work_co)
if not ok then
progress_close()
log("ERROR: " .. tostring(err))
reaper.PreventUIRefresh(-1)
reaper.Undo_EndBlock("Post-production: error", -1)
return
end
if coroutine.status(work_co) ~= "dead" then
reaper.defer(work_loop)
else
progress_phase = "Done!"
progress_pct = 1.0
progress_detail = ""
progress_draw()
progress_close()
end
end
progress_init()
work_co = coroutine.create(do_work)
reaper.defer(work_loop)

View File

@@ -1,4 +1,22 @@
[
{
"title": "Intern Pitches Himself Live On Air",
"description": "This intern used his first day on the job to shoot his shot with the entire radio audience. The therapy line is sending me.",
"episode_number": 36,
"clip_file": "clip-1-intern-pitches-himself-live-on-air.mp4",
"youtube_id": "exO3_9ewKH0",
"featured": false,
"thumbnail": "images/clips/clip-1-intern-pitches-himself-live-on-air.jpg"
},
{
"title": "Wait Until She Dies or Kill Her",
"description": "Luke gives the most UNHINGED inheritance advice I've ever heard on live radio. This escalated so fast.",
"episode_number": 35,
"clip_file": "clip-1-wait-until-she-dies-or-kill-her.mp4",
"youtube_id": "03oJoRh-ioo",
"featured": false,
"thumbnail": "images/clips/clip-1-wait-until-she-dies-or-kill-her.jpg"
},
{
"title": "Nobody's Potato Salad Is Good",
"description": "Luke goes OFF on workplace potlucks: 'Nobody's potato salad is f***ing good, alright? Everything at a potluck is gross. Just take everybody to McDonald's.'",

View File

@@ -252,4 +252,10 @@
<changefreq>never</changefreq>
<priority>0.7</priority>
</url>
<url>
<loc>https://lukeattheroost.com/episode.html?slug=episode-36-late-night-confessions-and-unexpected-moments</loc>
<lastmod>2026-03-14</lastmod>
<changefreq>never</changefreq>
<priority>0.7</priority>
</url>
</urlset>

File diff suppressed because one or more lines are too long