Clip pipeline improvements, direct YouTube upload, hero redesign, how-it-works updates

- make_clips: migrate refine_clip_timestamps to mlx-whisper, add LLM caption
  polishing, fix speaker label reversal in grouped caption lines
- upload_clips: interactive episode/clip/platform menus, direct YouTube Shorts
  upload via Data API v3 (bypasses Postiz), direct Bluesky upload
- Website hero: centered layout with left-column cover art on desktop, compact
  text links instead of pill buttons, scaled up typography
- How-it-works: move anatomy section above diagram, update stats (320 names,
  189+ personality layers, 20 towns, 570+ topics, 1400+ scenarios), add
  drunk/high/unhinged callers, voicemails, MLX Whisper GPU, LLM-polished captions
- All footers: add System Status link, remove Ko-fi branding
- .gitignore: YouTube OAuth credential files

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-16 04:06:23 -07:00
parent 3164a70e48
commit f0271e61df
9 changed files with 591 additions and 266 deletions

4
.gitignore vendored
View File

@@ -50,5 +50,9 @@ voices-v1.0.bin
# Reference voices for TTS # Reference voices for TTS
ref_audio/ ref_audio/
# YouTube OAuth credentials
youtube_client_secrets.json
youtube_token.json
# Claude settings (local) # Claude settings (local)
.claude/ .claude/

View File

@@ -31,8 +31,8 @@ load_dotenv(Path(__file__).parent / ".env")
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
RSS_FEED_URL = "https://podcast.macneilmediagroup.com/@LukeAtTheRoost/feed.xml" RSS_FEED_URL = "https://podcast.macneilmediagroup.com/@LukeAtTheRoost/feed.xml"
EPISODE_CACHE_DIR = Path(__file__).parent / "clips" / ".episode-cache" EPISODE_CACHE_DIR = Path(__file__).parent / "clips" / ".episode-cache"
WHISPER_MODEL_FAST = "base" WHISPER_MODEL_FAST = "distil-large-v3"
WHISPER_MODEL_QUALITY = "large-v3" WHISPER_MODEL_QUALITY = "distil-large-v3"
COVER_ART = Path(__file__).parent / "website" / "images" / "cover.png" COVER_ART = Path(__file__).parent / "website" / "images" / "cover.png"
# Fonts # Fonts
@@ -71,7 +71,7 @@ def _build_whisper_prompt(labeled_transcript: str) -> str:
def transcribe_with_timestamps(audio_path: str, whisper_model: str = None, def transcribe_with_timestamps(audio_path: str, whisper_model: str = None,
labeled_transcript: str = "") -> list[dict]: labeled_transcript: str = "") -> list[dict]:
"""Transcribe audio with word-level timestamps using faster-whisper. """Transcribe audio with word-level timestamps using mlx-whisper (Apple Silicon GPU).
Returns list of segments: [{start, end, text, words: [{word, start, end}]}] Returns list of segments: [{start, end, text, words: [{word, start, end}]}]
""" """
@@ -83,43 +83,51 @@ def transcribe_with_timestamps(audio_path: str, whisper_model: str = None,
return json.load(f) return json.load(f)
try: try:
from faster_whisper import WhisperModel import mlx_whisper
except ImportError: except ImportError:
print("Error: faster-whisper not installed. Run: pip install faster-whisper") print("Error: mlx-whisper not installed. Run: pip install mlx-whisper")
sys.exit(1) sys.exit(1)
MODEL_HF_REPOS = {
"distil-large-v3": "mlx-community/distil-whisper-large-v3",
"large-v3": "mlx-community/whisper-large-v3-mlx",
"medium": "mlx-community/whisper-medium-mlx",
"small": "mlx-community/whisper-small-mlx",
"base": "mlx-community/whisper-base-mlx",
}
hf_repo = MODEL_HF_REPOS.get(model_name, f"mlx-community/whisper-{model_name}-mlx")
initial_prompt = _build_whisper_prompt(labeled_transcript) initial_prompt = _build_whisper_prompt(labeled_transcript)
print(f" Model: {model_name}") print(f" Model: {model_name} (MLX GPU)")
if labeled_transcript: if labeled_transcript:
print(f" Prompt: {initial_prompt[:100]}...") print(f" Prompt: {initial_prompt[:100]}...")
model = WhisperModel(model_name, compute_type="float32")
segments_iter, info = model.transcribe( result = mlx_whisper.transcribe(
audio_path, audio_path,
path_or_hf_repo=hf_repo,
language="en",
word_timestamps=True, word_timestamps=True,
initial_prompt=initial_prompt, initial_prompt=initial_prompt,
language="en",
beam_size=5,
vad_filter=True,
) )
segments = [] segments = []
for seg in segments_iter: for seg in result.get("segments", []):
words = [] words = []
if seg.words: for w in seg.get("words", []):
for w in seg.words: words.append({
words.append({ "word": w["word"].strip(),
"word": w.word.strip(), "start": round(w["start"], 3),
"start": round(w.start, 3), "end": round(w["end"], 3),
"end": round(w.end, 3), })
})
segments.append({ segments.append({
"start": round(seg.start, 3), "start": round(seg["start"], 3),
"end": round(seg.end, 3), "end": round(seg["end"], 3),
"text": seg.text.strip(), "text": seg["text"].strip(),
"words": words, "words": words,
}) })
print(f" Transcribed {info.duration:.1f}s ({len(segments)} segments)") duration = segments[-1]["end"] if segments else 0
print(f" Transcribed {duration:.1f}s ({len(segments)} segments)")
with open(cache_path, "w") as f: with open(cache_path, "w") as f:
json.dump(segments, f) json.dump(segments, f)
@@ -131,33 +139,39 @@ def transcribe_with_timestamps(audio_path: str, whisper_model: str = None,
def refine_clip_timestamps(audio_path: str, clips: list[dict], def refine_clip_timestamps(audio_path: str, clips: list[dict],
quality_model: str, labeled_transcript: str = "", quality_model: str, labeled_transcript: str = "",
) -> dict[int, list[dict]]: ) -> dict[int, list[dict]]:
"""Re-transcribe just the selected clip ranges with a high-quality model. """Re-transcribe just the selected clip ranges with mlx-whisper (GPU).
Extracts each clip segment, runs the quality model on it, and returns Extracts each clip segment, runs the quality model on it, and returns
refined segments with timestamps mapped back to the original timeline. refined segments with word-level timestamps mapped back to the original timeline.
Returns: {clip_index: [segments]} keyed by clip index Returns: {clip_index: [segments]} keyed by clip index
""" """
try: try:
from faster_whisper import WhisperModel import mlx_whisper
except ImportError: except ImportError:
print("Error: faster-whisper not installed. Run: pip install faster-whisper") print("Error: mlx-whisper not installed. Run: pip install mlx-whisper")
sys.exit(1) sys.exit(1)
initial_prompt = _build_whisper_prompt(labeled_transcript) MODEL_HF_REPOS = {
print(f" Refinement model: {quality_model}") "distil-large-v3": "mlx-community/distil-whisper-large-v3",
"large-v3": "mlx-community/whisper-large-v3-mlx",
"medium": "mlx-community/whisper-medium-mlx",
"small": "mlx-community/whisper-small-mlx",
"base": "mlx-community/whisper-base-mlx",
}
hf_repo = MODEL_HF_REPOS.get(quality_model, f"mlx-community/whisper-{quality_model}-mlx")
model = None # Lazy-load so we skip if all cached print(f" Refinement model: {quality_model} (MLX GPU)")
initial_prompt = _build_whisper_prompt(labeled_transcript)
refined = {} refined = {}
with tempfile.TemporaryDirectory() as tmp: with tempfile.TemporaryDirectory() as tmp:
for i, clip in enumerate(clips): for i, clip in enumerate(clips):
# Add padding around clip for context (Whisper does better with some lead-in)
pad = 3.0 pad = 3.0
seg_start = max(0, clip["start_time"] - pad) seg_start = max(0, clip["start_time"] - pad)
seg_end = clip["end_time"] + pad seg_end = clip["end_time"] + pad
# Check cache first
cache_key = f"{Path(audio_path).stem}_clip{i}_{seg_start:.1f}-{seg_end:.1f}" cache_key = f"{Path(audio_path).stem}_clip{i}_{seg_start:.1f}-{seg_end:.1f}"
cache_path = Path(audio_path).parent / f".whisper_refine_{quality_model}_{cache_key}.json" cache_path = Path(audio_path).parent / f".whisper_refine_{quality_model}_{cache_key}.json"
if cache_path.exists(): if cache_path.exists():
@@ -166,7 +180,6 @@ def refine_clip_timestamps(audio_path: str, clips: list[dict],
refined[i] = json.load(f) refined[i] = json.load(f)
continue continue
# Extract clip segment to temp WAV
seg_path = os.path.join(tmp, f"segment_{i}.wav") seg_path = os.path.join(tmp, f"segment_{i}.wav")
cmd = [ cmd = [
"ffmpeg", "-y", "-ss", str(seg_start), "-t", str(seg_end - seg_start), "ffmpeg", "-y", "-ss", str(seg_start), "-t", str(seg_end - seg_start),
@@ -178,39 +191,35 @@ def refine_clip_timestamps(audio_path: str, clips: list[dict],
refined[i] = [] refined[i] = []
continue continue
# Lazy-load model on first non-cached clip mlx_result = mlx_whisper.transcribe(
if model is None:
model = WhisperModel(quality_model, compute_type="float32")
segments_iter, info = model.transcribe(
seg_path, seg_path,
path_or_hf_repo=hf_repo,
language="en",
word_timestamps=True, word_timestamps=True,
initial_prompt=initial_prompt, initial_prompt=initial_prompt,
language="en",
beam_size=5,
vad_filter=True,
) )
# Collect segments and offset timestamps back to original timeline
segments = [] segments = []
for seg in segments_iter: for seg_data in mlx_result.get("segments", []):
text = seg_data["text"].strip()
words = [] words = []
if seg.words: for w in seg_data.get("words", []):
for w in seg.words: words.append({
words.append({ "word": w["word"].strip(),
"word": w.word.strip(), "start": round(w["start"] + seg_start, 3),
"start": round(w.start + seg_start, 3), "end": round(w["end"] + seg_start, 3),
"end": round(w.end + seg_start, 3), })
})
segments.append({ segments.append({
"start": round(seg.start + seg_start, 3), "start": round(seg_data["start"] + seg_start, 3),
"end": round(seg.end + seg_start, 3), "end": round(seg_data["end"] + seg_start, 3),
"text": seg.text.strip(), "text": text,
"words": words, "words": words,
}) })
refined[i] = segments refined[i] = segments
print(f" Clip {i+1}: Refined {info.duration:.1f}s → {len(segments)} segments") seg_duration = segments[-1]["end"] - segments[0]["start"] if segments else 0
print(f" Clip {i+1}: Refined {seg_duration:.1f}s → {len(segments)} segments")
with open(cache_path, "w") as f: with open(cache_path, "w") as f:
json.dump(segments, f) json.dump(segments, f)
@@ -694,32 +703,116 @@ def _interpolate_speaker(idx: int, matched: dict, n_words: int) -> str | None:
return None return None
def polish_clip_words(words: list[dict], labeled_transcript: str = "") -> list[dict]:
"""Use LLM to fix punctuation, capitalization, and misheard words.
Sends the raw whisper words to an LLM, gets back a corrected version,
and maps corrections back to the original timed words.
"""
if not words or not OPENROUTER_API_KEY:
return words
raw_text = " ".join(w["word"] for w in words)
context = ""
if labeled_transcript:
context = f"\nFor reference, here's the speaker-labeled transcript of this section (use it to correct misheard words and names):\n{labeled_transcript[:3000]}\n"
prompt = f"""Fix this podcast transcript excerpt so it reads as proper sentences. Fix punctuation, capitalization, and obvious misheard words.
RULES:
- Keep the EXACT same number of words in the EXACT same order
- Only change capitalization, punctuation attached to words, and obvious mishearings
- Do NOT add, remove, merge, or reorder words
- Contractions count as one word (don't = 1 word)
- Return ONLY the corrected text, nothing else
{context}
RAW TEXT ({len(words)} words):
{raw_text}"""
try:
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"Content-Type": "application/json",
},
json={
"model": "anthropic/claude-sonnet-4-5",
"messages": [{"role": "user", "content": prompt}],
"max_tokens": 2048,
"temperature": 0,
},
timeout=30,
)
if response.status_code != 200:
print(f" Polish failed ({response.status_code}), using raw text")
return words
polished = response.json()["choices"][0]["message"]["content"].strip()
polished_words = polished.split()
if len(polished_words) != len(words):
print(f" Polish word count mismatch ({len(polished_words)} vs {len(words)}), using raw text")
return words
changes = 0
for i, pw in enumerate(polished_words):
if pw != words[i]["word"]:
changes += 1
words[i]["word"] = pw
if changes:
print(f" Polished {changes} words")
except Exception as e:
print(f" Polish error: {e}")
return words
def group_words_into_lines(words: list[dict], clip_start: float, def group_words_into_lines(words: list[dict], clip_start: float,
clip_duration: float) -> list[dict]: clip_duration: float) -> list[dict]:
"""Group words into timed caption lines for rendering. """Group words into timed caption lines for rendering.
Splits at speaker changes so each line has a single, correct speaker label.
Returns list of: {start, end, speaker, words: [{word, highlighted}]} Returns list of: {start, end, speaker, words: [{word, highlighted}]}
""" """
if not words: if not words:
return [] return []
# Group words into display lines (5-7 words per line) # First split at speaker boundaries, then group into display lines
raw_lines = [] speaker_groups = []
current_line = [] current_group = []
current_speaker = words[0].get("speaker", "")
for w in words: for w in words:
current_line.append(w) speaker = w.get("speaker", "")
if len(current_line) >= 6 or w["word"].rstrip().endswith(('.', '?', '!', ',')): if speaker and speaker != current_speaker and current_group:
if len(current_line) >= 3: speaker_groups.append((current_speaker, current_group))
raw_lines.append(current_line) current_group = []
current_line = [] current_speaker = speaker
if current_line: current_group.append(w)
if raw_lines and len(current_line) < 3: if current_group:
raw_lines[-1].extend(current_line) speaker_groups.append((current_speaker, current_group))
else:
raw_lines.append(current_line) # Now group each speaker's words into display lines (5-7 words)
raw_lines = []
for speaker, group_words in speaker_groups:
current_line = []
for w in group_words:
current_line.append(w)
if len(current_line) >= 6 or w["word"].rstrip().endswith(('.', '?', '!', ',')):
if len(current_line) >= 3:
raw_lines.append((speaker, current_line))
current_line = []
if current_line:
if raw_lines and len(current_line) < 3 and raw_lines[-1][0] == speaker:
raw_lines[-1] = (speaker, raw_lines[-1][1] + current_line)
else:
raw_lines.append((speaker, current_line))
lines = [] lines = []
for line_words in raw_lines: for speaker, line_words in raw_lines:
line_start = line_words[0]["start"] - clip_start line_start = line_words[0]["start"] - clip_start
line_end = line_words[-1]["end"] - clip_start line_end = line_words[-1]["end"] - clip_start
@@ -733,7 +826,7 @@ def group_words_into_lines(words: list[dict], clip_start: float,
lines.append({ lines.append({
"start": line_start, "start": line_start,
"end": line_end, "end": line_end,
"speaker": line_words[0].get("speaker", ""), "speaker": speaker,
"words": line_words, "words": line_words,
}) })
@@ -1334,6 +1427,9 @@ def main():
clip["start_time"], clip["end_time"], clip["start_time"], clip["end_time"],
word_source) word_source)
# Polish text with LLM (fix punctuation, capitalization, mishearings)
clip_words = polish_clip_words(clip_words, labeled_transcript)
# Group words into timed caption lines # Group words into timed caption lines
caption_lines = group_words_into_lines( caption_lines = group_words_into_lines(
clip_words, clip["start_time"], duration clip_words, clip["start_time"], duration

238
upload_clips.py Normal file → Executable file
View File

@@ -1,12 +1,11 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
"""Upload podcast clips to social media via Postiz (and direct Bluesky via atproto). """Upload podcast clips to social media (direct YouTube & Bluesky, Postiz for others).
Usage: Usage:
python upload_clips.py clips/episode-12/ python upload_clips.py # interactive: pick episode, clips, platforms
python upload_clips.py clips/episode-12/ --clip 1 python upload_clips.py clips/episode-12/ # pick clips and platforms interactively
python upload_clips.py clips/episode-12/ --platforms ig,yt python upload_clips.py clips/episode-12/ --clip 1 --platforms ig,yt
python upload_clips.py clips/episode-12/ --schedule "2026-02-16T10:00:00" python upload_clips.py clips/episode-12/ --yes # skip all prompts, upload everything
python upload_clips.py clips/episode-12/ --yes # skip confirmation
""" """
import argparse import argparse
@@ -27,6 +26,9 @@ POSTIZ_URL = os.getenv("POSTIZ_URL", "https://social.lukeattheroost.com")
BSKY_HANDLE = os.getenv("BSKY_HANDLE", "lukeattheroost.bsky.social") BSKY_HANDLE = os.getenv("BSKY_HANDLE", "lukeattheroost.bsky.social")
BSKY_APP_PASSWORD = os.getenv("BSKY_APP_PASSWORD") BSKY_APP_PASSWORD = os.getenv("BSKY_APP_PASSWORD")
YT_CLIENT_SECRETS = Path(__file__).parent / "youtube_client_secrets.json"
YT_TOKEN_FILE = Path(__file__).parent / "youtube_token.json"
PLATFORM_ALIASES = { PLATFORM_ALIASES = {
"ig": "instagram", "insta": "instagram", "instagram": "instagram", "ig": "instagram", "insta": "instagram", "instagram": "instagram",
"yt": "youtube", "youtube": "youtube", "yt": "youtube", "youtube": "youtube",
@@ -214,6 +216,106 @@ def post_to_bluesky(clip: dict, clip_file: Path) -> bool:
return True return True
def get_youtube_service():
"""Authenticate with YouTube API. First run opens a browser, then reuses saved token."""
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.discovery import build as yt_build
scopes = ["https://www.googleapis.com/auth/youtube.upload"]
creds = None
if YT_TOKEN_FILE.exists():
creds = Credentials.from_authorized_user_file(str(YT_TOKEN_FILE), scopes)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
if not YT_CLIENT_SECRETS.exists():
print(" Error: youtube_client_secrets.json not found")
print(" Download OAuth2 Desktop App credentials from Google Cloud Console")
return None
flow = InstalledAppFlow.from_client_secrets_file(str(YT_CLIENT_SECRETS), scopes)
creds = flow.run_local_server(port=8090)
with open(YT_TOKEN_FILE, "w") as f:
f.write(creds.to_json())
return yt_build("youtube", "v3", credentials=creds)
def post_to_youtube(clip: dict, clip_file: Path) -> bool:
"""Upload a clip directly to YouTube Shorts via the Data API."""
import time
import random
from googleapiclient.http import MediaFileUpload
from googleapiclient.errors import HttpError
youtube = get_youtube_service()
if not youtube:
return False
title = clip["title"]
if "#Shorts" not in title:
title = f"{title} #Shorts"
description = build_content(clip, "youtube")
if "#Shorts" not in description:
description += "\n\n#Shorts"
tags = [h.lstrip("#") for h in clip.get("hashtags", [])]
if "Shorts" not in tags:
tags.insert(0, "Shorts")
body = {
"snippet": {
"title": title[:100],
"description": description,
"tags": tags,
"categoryId": "24", # Entertainment
},
"status": {
"privacyStatus": "public",
"selfDeclaredMadeForKids": False,
},
}
media = MediaFileUpload(
str(clip_file),
mimetype="video/mp4",
chunksize=256 * 1024,
resumable=True,
)
request = youtube.videos().insert(part="snippet,status", body=body, media_body=media)
file_size = clip_file.stat().st_size / 1_000_000
print(f" Uploading video ({file_size:.1f} MB)...")
response = None
retry = 0
while response is None:
try:
status, response = request.next_chunk()
if status:
print(f" Upload {int(status.progress() * 100)}%...")
except HttpError as e:
if e.resp.status in (500, 502, 503, 504) and retry < 5:
retry += 1
wait = random.random() * (2 ** retry)
print(f" Retrying in {wait:.1f}s...")
time.sleep(wait)
else:
print(f" YouTube API error: {e}")
return False
video_id = response["id"]
print(f" https://youtube.com/shorts/{video_id}")
return True
def create_post(integration_id: str, content: str, media: dict, def create_post(integration_id: str, content: str, media: dict,
settings: dict, schedule: str | None = None) -> dict: settings: dict, schedule: str | None = None) -> dict:
from datetime import datetime, timezone from datetime import datetime, timezone
@@ -253,7 +355,7 @@ def create_post(integration_id: str, content: str, media: dict,
def main(): def main():
valid_names = sorted(set(PLATFORM_ALIASES.keys())) valid_names = sorted(set(PLATFORM_ALIASES.keys()))
parser = argparse.ArgumentParser(description="Upload podcast clips to social media via Postiz") parser = argparse.ArgumentParser(description="Upload podcast clips to social media via Postiz")
parser.add_argument("clips_dir", help="Path to clips directory (e.g. clips/episode-12/)") parser.add_argument("clips_dir", nargs="?", help="Path to clips directory (e.g. clips/episode-12/). If omitted, shows a picker.")
parser.add_argument("--clip", "-c", type=int, help="Upload only clip N (1-indexed)") parser.add_argument("--clip", "-c", type=int, help="Upload only clip N (1-indexed)")
parser.add_argument("--platforms", "-p", parser.add_argument("--platforms", "-p",
help=f"Comma-separated platforms ({','.join(ALL_PLATFORMS)}). Default: all") help=f"Comma-separated platforms ({','.join(ALL_PLATFORMS)}). Default: all")
@@ -266,6 +368,75 @@ def main():
print("Error: POSTIZ_API_KEY not set in .env") print("Error: POSTIZ_API_KEY not set in .env")
sys.exit(1) sys.exit(1)
# Resolve clips directory — pick interactively if not provided
if args.clips_dir:
clips_dir = Path(args.clips_dir).expanduser().resolve()
else:
clips_root = Path(__file__).parent / "clips"
episode_dirs = sorted(
[d for d in clips_root.iterdir()
if d.is_dir() and not d.name.startswith(".") and (d / "clips-metadata.json").exists()],
key=lambda d: d.name,
)
if not episode_dirs:
print("No clip directories found in clips/. Run make_clips.py first.")
sys.exit(1)
print("\nAvailable episodes:\n")
for i, d in enumerate(episode_dirs):
with open(d / "clips-metadata.json") as f:
meta = json.load(f)
print(f" {i+1}. {d.name} ({len(meta)} clip{'s' if len(meta) != 1 else ''})")
print()
while True:
try:
choice = input("Which episode? ").strip()
idx = int(choice) - 1
if 0 <= idx < len(episode_dirs):
clips_dir = episode_dirs[idx]
break
print(f" Enter 1-{len(episode_dirs)}")
except (ValueError, EOFError):
print(f" Enter an episode number")
metadata_path = clips_dir / "clips-metadata.json"
if not metadata_path.exists():
print(f"Error: No clips-metadata.json found in {clips_dir}")
print("Run make_clips.py first to generate clips and metadata.")
sys.exit(1)
with open(metadata_path) as f:
clips = json.load(f)
# Pick clips
if args.clip:
if args.clip < 1 or args.clip > len(clips):
print(f"Error: Clip {args.clip} not found (have {len(clips)} clips)")
sys.exit(1)
clips = [clips[args.clip - 1]]
elif not args.yes:
print(f"\nFound {len(clips)} clip(s):\n")
for i, clip in enumerate(clips):
desc = clip.get('description', clip.get('caption_text', ''))
if len(desc) > 70:
desc = desc[:desc.rfind(' ', 0, 70)] + '...'
print(f" {i+1}. \"{clip['title']}\" ({clip['duration']:.0f}s)")
print(f" {desc}")
print(f"\n a. All clips")
print()
while True:
choice = input("Which clips? (e.g. 1,3 or a for all): ").strip().lower()
if choice in ('a', 'all'):
break
try:
indices = [int(x.strip()) for x in choice.split(",")]
if all(1 <= x <= len(clips) for x in indices):
clips = [clips[x - 1] for x in indices]
break
print(f" Invalid selection. Enter 1-{len(clips)}, comma-separated, or 'a' for all.")
except (ValueError, EOFError):
print(f" Enter clip numbers (e.g. 1,3) or 'a' for all")
# Pick platforms
if args.platforms: if args.platforms:
requested = [] requested = []
for p in args.platforms.split(","): for p in args.platforms.split(","):
@@ -276,28 +447,29 @@ def main():
sys.exit(1) sys.exit(1)
requested.append(PLATFORM_ALIASES[p]) requested.append(PLATFORM_ALIASES[p])
target_platforms = list(dict.fromkeys(requested)) target_platforms = list(dict.fromkeys(requested))
elif not args.yes:
print(f"\nPlatforms:\n")
for i, p in enumerate(ALL_PLATFORMS):
print(f" {i+1}. {PLATFORM_DISPLAY[p]}")
print(f"\n a. All platforms (default)")
print()
choice = input("Which platforms? (e.g. 1,3,5 or a for all) [a]: ").strip().lower()
if choice and choice not in ('a', 'all'):
try:
indices = [int(x.strip()) for x in choice.split(",")]
target_platforms = [ALL_PLATFORMS[x - 1] for x in indices if 1 <= x <= len(ALL_PLATFORMS)]
if not target_platforms:
target_platforms = ALL_PLATFORMS[:]
except (ValueError, IndexError):
target_platforms = ALL_PLATFORMS[:]
else:
target_platforms = ALL_PLATFORMS[:]
else: else:
target_platforms = ALL_PLATFORMS[:] target_platforms = ALL_PLATFORMS[:]
clips_dir = Path(args.clips_dir).expanduser().resolve() DIRECT_PLATFORMS = {"bluesky", "youtube"}
metadata_path = clips_dir / "clips-metadata.json"
if not metadata_path.exists():
print(f"Error: No clips-metadata.json found in {clips_dir}")
print("Run make_clips.py first to generate clips and metadata.")
sys.exit(1)
with open(metadata_path) as f:
clips = json.load(f)
if args.clip:
if args.clip < 1 or args.clip > len(clips):
print(f"Error: Clip {args.clip} not found (have {len(clips)} clips)")
sys.exit(1)
clips = [clips[args.clip - 1]]
needs_postiz = not args.dry_run and any( needs_postiz = not args.dry_run and any(
p != "bluesky" for p in target_platforms) p not in DIRECT_PLATFORMS for p in target_platforms)
if needs_postiz: if needs_postiz:
print("Fetching connected accounts from Postiz...") print("Fetching connected accounts from Postiz...")
integrations = fetch_integrations() integrations = fetch_integrations()
@@ -312,6 +484,12 @@ def main():
else: else:
print("Warning: BSKY_APP_PASSWORD not set in .env, skipping Bluesky") print("Warning: BSKY_APP_PASSWORD not set in .env, skipping Bluesky")
continue continue
if platform == "youtube":
if YT_CLIENT_SECRETS.exists() or YT_TOKEN_FILE.exists() or args.dry_run:
active_platforms[platform] = {"name": "YouTube Shorts", "_direct": True}
else:
print("Warning: youtube_client_secrets.json not found, skipping YouTube")
continue
if args.dry_run: if args.dry_run:
active_platforms[platform] = {"name": PLATFORM_DISPLAY[platform]} active_platforms[platform] = {"name": PLATFORM_DISPLAY[platform]}
continue continue
@@ -384,6 +562,16 @@ def main():
else: else:
print(f" {display}: Failed") print(f" {display}: Failed")
if "youtube" in active_platforms:
print(f" Posting to YouTube Shorts (direct)...")
try:
if post_to_youtube(clip, clip_file):
print(f" YouTube: Posted!")
else:
print(f" YouTube: Failed")
except Exception as e:
print(f" YouTube: Failed — {e}")
if "bluesky" in active_platforms: if "bluesky" in active_platforms:
print(f" Posting to Bluesky (direct)...") print(f" Posting to Bluesky (direct)...")
try: try:

View File

@@ -47,7 +47,7 @@ a:hover {
/* Hero */ /* Hero */
.hero { .hero {
padding: 3rem 1.5rem 2rem; padding: 3rem 1.5rem 2.5rem;
max-width: 900px; max-width: 900px;
margin: 0 auto; margin: 0 auto;
text-align: center; text-align: center;
@@ -57,14 +57,14 @@ a:hover {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
align-items: center; align-items: center;
gap: 2rem; gap: 1.5rem;
} }
.cover-art { .cover-art {
width: 220px; width: 260px;
height: 220px; height: 260px;
border-radius: var(--radius); border-radius: var(--radius);
box-shadow: 0 8px 32px rgba(232, 121, 29, 0.35); box-shadow: 0 8px 32px rgba(232, 121, 29, 0.25);
object-fit: cover; object-fit: cover;
} }
@@ -72,31 +72,32 @@ a:hover {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
align-items: center; align-items: center;
gap: 0.75rem; gap: 0.5rem;
} }
.hero h1 { .hero h1 {
font-size: 2.5rem; font-size: 2.8rem;
font-weight: 800; font-weight: 800;
letter-spacing: -0.02em; letter-spacing: -0.02em;
} }
.tagline { .tagline {
font-size: 1.15rem; font-size: 1.2rem;
color: var(--text-muted); color: var(--text-muted);
max-width: 400px; max-width: 500px;
line-height: 1.5;
} }
.phone { .phone {
display: flex; display: flex;
align-items: center; align-items: center;
justify-content: center; justify-content: center;
gap: 0.6rem; gap: 0.5rem;
margin-top: 0.5rem; margin-top: 0.25rem;
} }
.phone-inline { .phone-inline {
font-size: 0.95rem; font-size: 1rem;
color: var(--text-muted); color: var(--text-muted);
} }
@@ -110,12 +111,12 @@ a:hover {
.on-air-badge { .on-air-badge {
display: none; display: none;
align-items: center; align-items: center;
gap: 0.4rem; gap: 0.35rem;
background: var(--accent-red); background: var(--accent-red);
color: #fff; color: #fff;
padding: 0.25rem 0.75rem; padding: 0.2rem 0.6rem;
border-radius: 50px; border-radius: 50px;
font-size: 0.7rem; font-size: 0.65rem;
font-weight: 800; font-weight: 800;
letter-spacing: 0.12em; letter-spacing: 0.12em;
text-transform: uppercase; text-transform: uppercase;
@@ -128,8 +129,8 @@ a:hover {
} }
.on-air-dot { .on-air-dot {
width: 8px; width: 7px;
height: 8px; height: 7px;
border-radius: 50%; border-radius: 50%;
background: #fff; background: #fff;
animation: on-air-blink 1s step-end infinite; animation: on-air-blink 1s step-end infinite;
@@ -149,11 +150,11 @@ a:hover {
.off-air-badge { .off-air-badge {
display: inline-flex; display: inline-flex;
align-items: center; align-items: center;
background: #444; background: rgba(255, 255, 255, 0.08);
color: var(--text-muted); color: var(--text-muted);
padding: 0.25rem 0.75rem; padding: 0.2rem 0.6rem;
border-radius: 50px; border-radius: 50px;
font-size: 0.7rem; font-size: 0.65rem;
font-weight: 700; font-weight: 700;
letter-spacing: 0.1em; letter-spacing: 0.1em;
text-transform: uppercase; text-transform: uppercase;
@@ -169,52 +170,58 @@ a:hover {
text-shadow: 0 0 16px rgba(204, 34, 34, 0.35); text-shadow: 0 0 16px rgba(204, 34, 34, 0.35);
} }
/* Subscribe buttons — primary listen platforms */ /* Subscribe — compact inline text links */
.subscribe-row { .subscribe-row {
display: flex; display: flex;
flex-direction: column;
align-items: center; align-items: center;
gap: 0.6rem; justify-content: center;
margin-top: 1.5rem; gap: 0.5rem;
margin-top: 1rem;
} }
.subscribe-label { .subscribe-label {
font-size: 0.75rem; font-size: 0.8rem;
color: var(--text-muted); color: var(--text-muted);
text-transform: uppercase; text-transform: uppercase;
letter-spacing: 0.15em; letter-spacing: 0.12em;
opacity: 0.6;
} }
.subscribe-buttons { .subscribe-buttons {
display: flex; display: flex;
flex-wrap: wrap; flex-wrap: wrap;
justify-content: center; justify-content: center;
gap: 0.5rem; align-items: center;
gap: 0.15rem;
} }
.subscribe-btn { .subscribe-btn {
display: inline-flex; display: inline-flex;
align-items: center; align-items: center;
gap: 0.4rem; gap: 0.35rem;
padding: 0.45rem 1rem; padding: 0.4rem 0.75rem;
border-radius: 50px; border-radius: 6px;
font-size: 0.8rem; font-size: 0.9rem;
font-weight: 600; font-weight: 600;
color: var(--text); color: var(--text-muted);
background: transparent; background: transparent;
border: 1px solid var(--text-muted); border: none;
transition: border-color 0.2s, color 0.2s; transition: color 0.2s;
} }
.subscribe-btn:hover { .subscribe-btn:hover {
border-color: var(--accent);
color: var(--accent); color: var(--accent);
} }
.subscribe-btn svg { .subscribe-btn svg {
width: 14px; width: 15px;
height: 14px; height: 15px;
flex-shrink: 0; flex-shrink: 0;
opacity: 0.6;
}
.subscribe-btn:hover svg {
opacity: 1;
} }
/* Secondary links — How It Works, Discord, Support */ /* Secondary links — How It Works, Discord, Support */
@@ -224,23 +231,35 @@ a:hover {
justify-content: center; justify-content: center;
align-items: center; align-items: center;
gap: 0.5rem; gap: 0.5rem;
margin-top: 0.75rem; margin-top: 0.25rem;
} }
.secondary-link { .secondary-link {
font-size: 0.8rem; font-size: 0.85rem;
color: var(--text-muted); color: var(--text-muted);
transition: color 0.2s; opacity: 0.6;
transition: color 0.2s, opacity 0.2s;
} }
.secondary-link:hover { .secondary-link:hover {
color: var(--accent); color: var(--accent);
opacity: 1;
} }
.secondary-sep { .secondary-sep {
color: var(--text-muted); color: var(--text-muted);
opacity: 0.4; opacity: 0.3;
font-size: 0.8rem; font-size: 0.85rem;
}
.support-link {
color: var(--accent);
opacity: 1;
font-weight: 600;
}
.support-link:hover {
color: var(--accent-hover);
} }
/* Episodes */ /* Episodes */
@@ -1201,13 +1220,15 @@ a:hover {
/* Desktop */ /* Desktop */
@media (min-width: 768px) { @media (min-width: 768px) {
.hero { .hero {
padding: 4rem 2rem 2.5rem; padding: 3.5rem 2rem 2.5rem;
max-width: 1000px;
} }
.hero-inner { .hero-inner {
flex-direction: row; flex-direction: row;
text-align: left; text-align: left;
gap: 3rem; gap: 2.5rem;
align-items: center;
} }
.hero-info { .hero-info {
@@ -1215,8 +1236,17 @@ a:hover {
} }
.cover-art { .cover-art {
width: 260px; width: 280px;
height: 260px; height: 280px;
flex-shrink: 0;
}
.hero h1 {
font-size: 2.8rem;
}
.phone {
justify-content: flex-start;
} }
.subscribe-row { .subscribe-row {

View File

@@ -109,9 +109,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a> <a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div> </div>
</div> </div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p> <p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p> <p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p> <p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer> </footer>
<!-- Sticky Audio Player --> <!-- Sticky Audio Player -->

View File

@@ -69,6 +69,109 @@
<p class="page-subtitle">Every caller on the show is a one-of-a-kind character — generated in real time by a custom-built AI system. Here's a peek behind the curtain.</p> <p class="page-subtitle">Every caller on the show is a one-of-a-kind character — generated in real time by a custom-built AI system. Here's a peek behind the curtain.</p>
</section> </section>
<!-- Steps -->
<section class="hiw-section">
<h2>The Anatomy of an AI Caller</h2>
<div class="hiw-steps">
<div class="hiw-step">
<div class="hiw-step-number">1</div>
<div class="hiw-step-content">
<h3>A Person Is Born</h3>
<p>Every caller starts as a blank slate. The system generates a complete identity: name, age, job, hometown, and personality. Each caller gets a unique speaking style — some ramble, some are blunt, some deflect with humor. They have relationships, vehicles, strong food opinions, nostalgic memories, and reasons for being up this late. They know what they were watching on TV, what errand they ran today, and what song was on the radio before they called.</p>
<p>Some callers become regulars. The system tracks returning callers across episodes — they remember past conversations, reference things they talked about before, and their stories evolve over time. You'll hear Carla update you on her divorce, or Carl check in about his gambling recovery. They're not reset between shows.</p>
<p>And some callers are drunk, high, or flat-out unhinged. They'll call with conspiracy theories about pigeons being government drones, existential crises about whether fish know they're wet, or to confess they accidentally set their kitchen on fire trying to make grilled cheese at 3 AM.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Unique Names</span>
<span class="hiw-detail-value">320</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Personality Layers</span>
<span class="hiw-detail-value">189+</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Towns with Real Knowledge</span>
<span class="hiw-detail-value">20</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Returning Regulars</span>
<span class="hiw-detail-value">12 callers</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">2</div>
<div class="hiw-step-content">
<h3>They Know Their World</h3>
<p>Callers know real facts about where they live — the restaurants, the highways, the local gossip. When a caller says they're from Lordsburg, they actually know about the Shakespeare ghost town and the drive to Deming. They know the current weather outside their window, what day of the week it is, whether it's monsoon season or chile harvest. They have strong opinions about where to get the best green chile and get nostalgic about how their town used to be. The system also pulls in real-time news so callers can reference things that actually happened today.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">3</div>
<div class="hiw-step-content">
<h3>They Have a Reason to Call</h3>
<p>Some callers have a problem — a fight with a neighbor, a situation at work, something weighing on them at 2 AM. Others call to geek out about Severance, argue about poker strategy, or share something they read about quantum physics. The system draws from over 570 discussion topics across dozens of categories and more than 1,400 life scenarios. Every caller has a purpose, not just a script.</p>
<div class="hiw-split-stat">
<div class="hiw-stat">
<span class="hiw-stat-number">70%</span>
<span class="hiw-stat-label">Need advice</span>
</div>
<div class="hiw-stat">
<span class="hiw-stat-number">30%</span>
<span class="hiw-stat-label">Want to talk about something</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">4</div>
<div class="hiw-step-content">
<h3>The Conversation Is Real</h3>
<p>Luke talks to each caller using push-to-talk, just like a real radio show. His voice is transcribed in real time, sent to an AI that responds in character, and then converted to speech using a voice engine — all in a few seconds. The AI doesn't just answer questions; it reacts, gets emotional, goes on tangents, and remembers what was said earlier in the show. Callers even react to previous callers — "Hey Luke, I heard that guy Tony earlier and I got to say, he's full of it." It makes the show feel like a living community, not isolated calls.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">5</div>
<div class="hiw-step-content">
<h3>Real Callers Call In Too</h3>
<p>When you dial 208-439-LUKE, your call goes into a live queue. Luke sees you waiting and can take your call right from the control room. Your voice streams in real time — no pre-recording, no delay. You're live on the show, talking to Luke, and the AI callers might even react to what you said. And if Luke isn't live, you can leave a voicemail — it gets transcribed and may get played on a future episode.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">6</div>
<div class="hiw-step-content">
<h3>The Control Room</h3>
<p>The entire show runs through a custom-built control panel. Luke manages callers, plays music and sound effects, runs ads, monitors the call queue, and controls everything from one screen. Audio is routed across multiple channels simultaneously — caller voices, music, sound effects, and live phone audio all on separate tracks. The website shows a live on-air indicator so listeners know when to call in.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Audio Channels</span>
<span class="hiw-detail-value">5 independent</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Caller Slots</span>
<span class="hiw-detail-value">10 per session</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Phone System</span>
<span class="hiw-detail-value">VoIP + WebSocket</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Live Status</span>
<span class="hiw-detail-value">Real-time CDN</span>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Overview --> <!-- Overview -->
<section class="hiw-section"> <section class="hiw-section">
<div class="hiw-card hiw-hero-card"> <div class="hiw-card hiw-hero-card">
@@ -211,6 +314,12 @@
</div> </div>
<span>Social Clips</span> <span>Social Clips</span>
</div> </div>
<div class="diagram-box">
<div class="diagram-icon">
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M22 12h-4l-3 9L9 3l-3 9H2"/></svg>
</div>
<span>Monitoring</span>
</div>
</div> </div>
<div class="diagram-arrow">&#8595;</div> <div class="diagram-arrow">&#8595;</div>
<!-- Row 7: Distribution --> <!-- Row 7: Distribution -->
@@ -281,108 +390,6 @@
</div> </div>
</section> </section>
<!-- Steps -->
<section class="hiw-section">
<h2>The Anatomy of an AI Caller</h2>
<div class="hiw-steps">
<div class="hiw-step">
<div class="hiw-step-number">1</div>
<div class="hiw-step-content">
<h3>A Person Is Born</h3>
<p>Every caller starts as a blank slate. The system generates a complete identity: name, age, job, hometown, and personality. Each caller gets a unique speaking style — some ramble, some are blunt, some deflect with humor. They have relationships, vehicles, strong food opinions, nostalgic memories, and reasons for being up this late. They know what they were watching on TV, what errand they ran today, and what song was on the radio before they called.</p>
<p>Some callers become regulars. The system tracks returning callers across episodes — they remember past conversations, reference things they talked about before, and their stories evolve over time. You'll hear Carla update you on her divorce, or Carl check in about his gambling recovery. They're not reset between shows.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Unique Names</span>
<span class="hiw-detail-value">160 names</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Personality Layers</span>
<span class="hiw-detail-value">30+</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Towns with Real Knowledge</span>
<span class="hiw-detail-value">32</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Returning Regulars</span>
<span class="hiw-detail-value">12+ callers</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">2</div>
<div class="hiw-step-content">
<h3>They Know Their World</h3>
<p>Callers know real facts about where they live — the restaurants, the highways, the local gossip. When a caller says they're from Lordsburg, they actually know about the Shakespeare ghost town and the drive to Deming. They know the current weather outside their window, what day of the week it is, whether it's monsoon season or chile harvest. They have strong opinions about where to get the best green chile and get nostalgic about how their town used to be. The system also pulls in real-time news so callers can reference things that actually happened today.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">3</div>
<div class="hiw-step-content">
<h3>They Have a Reason to Call</h3>
<p>Some callers have a problem — a fight with a neighbor, a situation at work, something weighing on them at 2 AM. Others call to geek out about Severance, argue about poker strategy, or share something they read about quantum physics. Every caller has a purpose, not just a script.</p>
<div class="hiw-split-stat">
<div class="hiw-stat">
<span class="hiw-stat-number">70%</span>
<span class="hiw-stat-label">Need advice</span>
</div>
<div class="hiw-stat">
<span class="hiw-stat-number">30%</span>
<span class="hiw-stat-label">Want to talk about something</span>
</div>
</div>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">4</div>
<div class="hiw-step-content">
<h3>The Conversation Is Real</h3>
<p>Luke talks to each caller using push-to-talk, just like a real radio show. His voice is transcribed in real time, sent to an AI that responds in character, and then converted to speech using a voice engine — all in a few seconds. The AI doesn't just answer questions; it reacts, gets emotional, goes on tangents, and remembers what was said earlier in the show. Callers even react to previous callers — "Hey Luke, I heard that guy Tony earlier and I got to say, he's full of it." It makes the show feel like a living community, not isolated calls.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">5</div>
<div class="hiw-step-content">
<h3>Real Callers Call In Too</h3>
<p>When you dial 208-439-LUKE, your call goes into a live queue. Luke sees you waiting and can take your call right from the control room. Your voice streams in real time — no pre-recording, no delay. You're live on the show, talking to Luke, and the AI callers might even react to what you said.</p>
</div>
</div>
<div class="hiw-step">
<div class="hiw-step-number">6</div>
<div class="hiw-step-content">
<h3>The Control Room</h3>
<p>The entire show runs through a custom-built control panel. Luke manages callers, plays music and sound effects, runs ads, monitors the call queue, and controls everything from one screen. Audio is routed across multiple channels simultaneously — caller voices, music, sound effects, and live phone audio all on separate tracks. The website shows a live on-air indicator so listeners know when to call in.</p>
<div class="hiw-detail-grid">
<div class="hiw-detail">
<span class="hiw-detail-label">Audio Channels</span>
<span class="hiw-detail-value">5 independent</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Caller Slots</span>
<span class="hiw-detail-value">10 per session</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Phone System</span>
<span class="hiw-detail-value">VoIP + WebSocket</span>
</div>
<div class="hiw-detail">
<span class="hiw-detail-label">Live Status</span>
<span class="hiw-detail-value">Real-time CDN</span>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Post-Production Pipeline --> <!-- Post-Production Pipeline -->
<section class="hiw-section"> <section class="hiw-section">
<h2>From Live Show to Podcast</h2> <h2>From Live Show to Podcast</h2>
@@ -444,11 +451,11 @@
<div class="hiw-step-number">9</div> <div class="hiw-step-number">9</div>
<div class="hiw-step-content"> <div class="hiw-step-content">
<h3>Automated Publishing</h3> <h3>Automated Publishing</h3>
<p>A single command takes a finished episode and handles everything: the audio is transcribed using speech recognition to generate full-text transcripts, then an LLM analyzes the transcript to write the episode title, description, and chapter markers with timestamps. The episode is uploaded to the podcast server, chapters and transcripts are attached to the metadata, and all media is synced to a global CDN so listeners everywhere get fast downloads.</p> <p>A single command takes a finished episode and handles everything: the audio is transcribed using MLX Whisper running on Apple Silicon GPU to generate full-text transcripts, then an LLM analyzes the transcript to write the episode title, description, and chapter markers with timestamps. The episode is uploaded to the podcast server, chapters and transcripts are attached to the metadata, and all media is synced to a global CDN so listeners everywhere get fast downloads.</p>
<div class="hiw-detail-grid"> <div class="hiw-detail-grid">
<div class="hiw-detail"> <div class="hiw-detail">
<span class="hiw-detail-label">Transcription</span> <span class="hiw-detail-label">Transcription</span>
<span class="hiw-detail-value">Whisper AI</span> <span class="hiw-detail-value">MLX Whisper (GPU)</span>
</div> </div>
<div class="hiw-detail"> <div class="hiw-detail">
<span class="hiw-detail-label">Metadata</span> <span class="hiw-detail-label">Metadata</span>
@@ -470,7 +477,7 @@
<div class="hiw-step-number">10</div> <div class="hiw-step-number">10</div>
<div class="hiw-step-content"> <div class="hiw-step-content">
<h3>Automated Social Clips</h3> <h3>Automated Social Clips</h3>
<p>No manual editing, no scheduling tools. After each episode, an LLM reads the full transcript and picks the best moments — funny exchanges, wild confessions, heated debates. Each clip is automatically extracted, captioned with word-level timing, and rendered as a vertical video with the show's branding. A second LLM pass writes platform-specific descriptions and hashtags. Then a single script blasts every clip to Instagram Reels, YouTube Shorts, Facebook, Bluesky, and Mastodon simultaneously — six platforms, zero manual work.</p> <p>No manual editing, no scheduling tools. After each episode, an LLM reads the full transcript and picks the best moments — funny exchanges, wild confessions, heated debates. Each clip is automatically extracted, transcribed with word-level timestamps, then polished by a second LLM pass that fixes punctuation, capitalization, and misheard words while preserving timing. The clips are rendered as vertical video with speaker-labeled captions and the show's branding. A third LLM writes platform-specific descriptions and hashtags. Then clips are uploaded directly to YouTube Shorts and Bluesky via their APIs, and pushed to Instagram Reels, Facebook, and Mastodon — six platforms, zero manual work.</p>
<div class="hiw-detail-grid"> <div class="hiw-detail-grid">
<div class="hiw-detail"> <div class="hiw-detail">
<span class="hiw-detail-label">Human Effort</span> <span class="hiw-detail-label">Human Effort</span>
@@ -482,7 +489,7 @@
</div> </div>
<div class="hiw-detail"> <div class="hiw-detail">
<span class="hiw-detail-label">Captions</span> <span class="hiw-detail-label">Captions</span>
<span class="hiw-detail-value">Word-level sync</span> <span class="hiw-detail-value">LLM-polished</span>
</div> </div>
<div class="hiw-detail"> <div class="hiw-detail">
<span class="hiw-detail-label">Simultaneous Push</span> <span class="hiw-detail-label">Simultaneous Push</span>
@@ -576,7 +583,7 @@
<div class="hiw-cta-phone"> <div class="hiw-cta-phone">
Or call in live: <strong>208-439-LUKE</strong> Or call in live: <strong>208-439-LUKE</strong>
</div> </div>
<a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener" class="hiw-cta-support">Support the show on Ko-fi</a> <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener" class="hiw-cta-support">Support the Show</a>
</section> </section>
<!-- Footer --> <!-- Footer -->
@@ -602,9 +609,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a> <a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div> </div>
</div> </div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p> <p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p> <p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p> <p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer> </footer>
</body> </body>

View File

@@ -117,12 +117,20 @@
<span class="secondary-sep">&middot;</span> <span class="secondary-sep">&middot;</span>
<a href="https://discord.gg/5CnQZxDM" target="_blank" rel="noopener" class="secondary-link">Discord</a> <a href="https://discord.gg/5CnQZxDM" target="_blank" rel="noopener" class="secondary-link">Discord</a>
<span class="secondary-sep">&middot;</span> <span class="secondary-sep">&middot;</span>
<a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener" class="secondary-link">Support the Show</a> <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener" class="secondary-link support-link">Support the Show</a>
</div> </div>
</div> </div>
</div> </div>
</section> </section>
<!-- Episodes -->
<section class="episodes-section">
<h2>Episodes</h2>
<div class="episodes-list" id="episodes-list">
<div class="episodes-loading">Loading episodes...</div>
</div>
</section>
<!-- Testimonials --> <!-- Testimonials -->
<section class="testimonials-section"> <section class="testimonials-section">
<h2>What Callers Are Saying</h2> <h2>What Callers Are Saying</h2>
@@ -197,14 +205,6 @@
<div class="testimonials-dots" id="testimonials-dots"></div> <div class="testimonials-dots" id="testimonials-dots"></div>
</section> </section>
<!-- Episodes -->
<section class="episodes-section">
<h2>Episodes</h2>
<div class="episodes-list" id="episodes-list">
<div class="episodes-loading">Loading episodes...</div>
</div>
</section>
<!-- Footer --> <!-- Footer -->
<footer class="footer"> <footer class="footer">
<div class="footer-links"> <div class="footer-links">
@@ -228,9 +228,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a> <a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div> </div>
</div> </div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p> <p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p> <p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p> <p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer> </footer>
<!-- Sticky Audio Player --> <!-- Sticky Audio Player -->

View File

@@ -106,9 +106,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a> <a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div> </div>
</div> </div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p> <p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p> <p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p> <p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer> </footer>
</body> </body>

View File

@@ -78,9 +78,9 @@
<a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a> <a href="https://youtube.com/lukemacneil" target="_blank" rel="noopener">YouTube</a>
</div> </div>
</div> </div>
<p class="footer-contact">Support the show: <a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Ko-fi</a></p> <p class="footer-contact"><a href="https://ko-fi.com/lukemacneil" target="_blank" rel="noopener">Support the Show</a></p>
<p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p> <p class="footer-contact">Sales &amp; Collaboration: <a href="mailto:luke@macneilmediagroup.com">luke@macneilmediagroup.com</a></p>
<p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a></p> <p>&copy; 2026 Luke at the Roost &middot; <a href="/privacy">Privacy Policy</a> &middot; <a href="https://monitoring.macneilmediagroup.com/status/lukeattheroost" target="_blank" rel="noopener">System Status</a></p>
</footer> </footer>
<script> <script>