Skip to content

Instantly share code, notes, and snippets.

@motebaya
Created March 11, 2026 08:02
Show Gist options
  • Select an option

  • Save motebaya/17febcd3779a3e4a310b31093a6215cf to your computer and use it in GitHub Desktop.

Select an option

Save motebaya/17febcd3779a3e4a310b31093a6215cf to your computer and use it in GitHub Desktop.
procedurally generated youtube poop about freelance developer life, made entirely with python (pillow + numpy)
"""
YTP: "the_freelancer_experience.mp4"
A YouTube Poop expressing what it's like to be a freelance software engineer.
Generates all frames (PIL) and audio (numpy/wave), then calls ffmpeg to render.
No external media assets - everything is procedurally generated.
prompt:
can you use whatever resources you like, and python, to generate a short 'youtube poop' video and render it using ffmpeg ? can you put more of a personal spin on it? it should express what it's like to be an freelancer software engineer. in the end write and describe detail video to <freelancer_software_enginneer>.md anything you used from start to final write to this file.
model:
claude-opus-4-6-thinking - max
date: 3/11/2026 - 3:01 PM
"""
import os
import math
import random
import wave
import colorsys
from PIL import Image, ImageDraw, ImageFont, ImageFilter, ImageChops, ImageOps
import numpy as np
# ── Config ──────────────────────────────────────────────────────────────────
W, H = 640, 480
FPS = 24
SAMPLE_RATE = 44100
OUT_DIR = "freelancer_build"
FRAMES_DIR = os.path.join(OUT_DIR, "frames")
AUDIO_FILE = os.path.join(OUT_DIR, "audio.wav")
FINAL_VIDEO = "freelancer_ytp.mp4"
os.makedirs(FRAMES_DIR, exist_ok=True)
random.seed(1337)
np.random.seed(1337)
# ── Helpers ─────────────────────────────────────────────────────────────────
def make_img(bg=(0, 0, 0)):
return Image.new("RGB", (W, H), bg)
def get_font(size):
for name in ["consola.ttf", "cour.ttf", "lucon.ttf"]:
try:
return ImageFont.truetype(os.path.join("C:/Windows/Fonts", name), size)
except (OSError, IOError):
pass
for name in ["DejaVuSansMono.ttf", "LiberationMono-Regular.ttf"]:
try:
return ImageFont.truetype(name, size)
except (OSError, IOError):
pass
return ImageFont.load_default()
def get_bold(size):
for name in ["consolab.ttf", "courbd.ttf", "impact.ttf", "arialbd.ttf"]:
try:
return ImageFont.truetype(os.path.join("C:/Windows/Fonts", name), size)
except (OSError, IOError):
pass
return get_font(size)
def get_sans(size):
for name in ["arial.ttf", "segoeui.ttf", "calibri.ttf"]:
try:
return ImageFont.truetype(os.path.join("C:/Windows/Fonts", name), size)
except (OSError, IOError):
pass
return get_font(size)
def glitch(img, intensity=10):
pixels = img.load()
result = img.copy()
rp = result.load()
for _ in range(intensity):
y = random.randint(0, H - 1)
h = random.randint(1, min(40, H - y))
dx = random.randint(-100, 100)
for row in range(y, min(y + h, H)):
for x in range(W):
sx = (x + dx) % W
rp[x, row] = pixels[sx, row]
return result
def chromatic(img, offset=5):
r, g, b = img.split()
r = ImageChops.offset(r, offset, 0)
b = ImageChops.offset(b, -offset, 0)
return Image.merge("RGB", (r, g, b))
def scanlines(img, opacity=60):
overlay = Image.new("RGB", (W, H), (0, 0, 0))
d = ImageDraw.Draw(overlay)
for y in range(0, H, 3):
d.line([(0, y), (W, y)], fill=(0, 0, 0), width=1)
return Image.blend(img, overlay, opacity / 255.0)
def tv_static(img, amount=0.3):
arr = np.array(img)
noise = np.random.randint(0, 256, arr.shape, dtype=np.uint8)
blended = (arr.astype(float) * (1 - amount) + noise.astype(float) * amount).astype(np.uint8)
return Image.fromarray(blended)
def color_corrupt(img):
channels = list(img.split())
op = random.choice(["swap", "invert", "tint"])
if op == "swap":
random.shuffle(channels)
elif op == "invert":
idx = random.randint(0, 2)
channels[idx] = ImageOps.invert(channels[idx])
elif op == "tint":
c = random.choice([(255, 60, 60), (60, 255, 60), (60, 60, 255), (255, 255, 0), (255, 0, 255)])
tint = Image.new("RGB", (W, H), c)
return Image.blend(img, tint, 0.4)
return Image.merge("RGB", channels[:3])
def draw_centered(draw, text, y, font, fill=(255, 255, 255)):
bbox = draw.textbbox((0, 0), text, font=font)
tw = bbox[2] - bbox[0]
draw.text(((W - tw) // 2, y), text, font=font, fill=fill)
def draw_shadow(draw, text, x, y, font, fill=(255, 255, 255), shadow=(0, 0, 0)):
draw.text((x + 2, y + 2), text, font=font, fill=shadow)
draw.text((x, y), text, font=font, fill=fill)
def draw_bubble(draw, x, y, w, h, fill=(255, 255, 255), outline=None, radius=12):
"""Draw a rounded rectangle chat bubble."""
draw.rounded_rectangle([(x, y), (x + w, y + h)], radius=radius, fill=fill, outline=outline)
def draw_notification(draw, x, y, count, color=(255, 59, 48)):
"""Draw iOS-style notification badge."""
r = 12
draw.ellipse([(x, y), (x + r * 2, y + r * 2)], fill=color)
f = get_bold(14)
txt = str(count)
bbox = draw.textbbox((0, 0), txt, font=f)
tw = bbox[2] - bbox[0]
th = bbox[3] - bbox[1]
draw.text((x + r - tw // 2, y + r - th // 2 - 1), txt, font=f, fill=(255, 255, 255))
# ── Audio helpers ───────────────────────────────────────────────────────────
def silence(dur):
return np.zeros(int(SAMPLE_RATE * dur))
def sine(freq, dur, vol=0.3):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
return (np.sin(2 * np.pi * freq * t) * vol).astype(np.float64)
def square(freq, dur, vol=0.2):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
return (np.sign(np.sin(2 * np.pi * freq * t)) * vol).astype(np.float64)
def noise(dur, vol=0.4):
return (np.random.uniform(-1, 1, int(SAMPLE_RATE * dur)) * vol).astype(np.float64)
def distort(audio, gain=3.0):
return np.clip(audio * gain, -0.9, 0.9)
def stutter(audio, chunk=0.04, reps=6):
cs = int(SAMPLE_RATE * chunk)
if len(audio) < cs:
return audio
return np.tile(audio[:cs], reps)
def drum_kick(dur=0.15, vol=0.5):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
freq = 150 * np.exp(-t * 30)
env = np.exp(-t * 15)
return (np.sin(2 * np.pi * freq * t) * env * vol).astype(np.float64)
def drum_snare(dur=0.1, vol=0.3):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
tone = np.sin(2 * np.pi * 200 * t) * np.exp(-t * 30) * 0.3
n = np.random.uniform(-1, 1, len(t)) * np.exp(-t * 20) * 0.5
return ((tone + n) * vol).astype(np.float64)
def drum_hihat(dur=0.05, vol=0.2):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
n = np.random.uniform(-1, 1, len(t)) * np.exp(-t * 60)
return (n * vol).astype(np.float64)
def notification_ding(dur=0.3, vol=0.4):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
env = np.exp(-t * 8)
return (np.sin(2 * np.pi * 880 * t) * env * vol +
np.sin(2 * np.pi * 1320 * t) * env * vol * 0.5).astype(np.float64)
def keyboard_click(dur=0.02, vol=0.3):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
return (np.random.uniform(-1, 1, len(t)) * np.exp(-t * 200) * vol).astype(np.float64)
def cash_register(dur=0.4, vol=0.3):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
bell = np.sin(2 * np.pi * 2000 * t) * np.exp(-t * 5) * 0.3
mech = np.random.uniform(-1, 1, len(t)) * np.exp(-t * 10) * 0.2
ching = np.sin(2 * np.pi * 3500 * t) * np.exp(-t * 8) * 0.4
return ((bell + mech + ching) * vol).astype(np.float64)
def sad_trombone(dur=1.0, vol=0.25):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
freqs = [293, 277, 261, 220]
out = np.zeros_like(t)
for i, f in enumerate(freqs):
start = int(len(t) * i / len(freqs))
end = int(len(t) * (i + 1) / len(freqs))
seg = np.linspace(0, (end - start) / SAMPLE_RATE, end - start, endpoint=False)
out[start:end] = np.sin(2 * np.pi * f * seg) * 0.3
env = np.linspace(1, 0.3, len(t))
return (out * env * vol).astype(np.float64)
def alarm_clock(dur=0.8, vol=0.3):
t = np.linspace(0, dur, int(SAMPLE_RATE * dur), endpoint=False)
beep1 = np.sin(2 * np.pi * 1000 * t)
on_off = np.sign(np.sin(2 * np.pi * 8 * t))
on_off = np.clip(on_off, 0, 1)
return (beep1 * on_off * vol).astype(np.float64)
def coffee_pour(dur=0.6, vol=0.15):
"""Bubbly noise."""
samples = int(SAMPLE_RATE * dur)
t = np.linspace(0, dur, samples, endpoint=False)
base = np.random.uniform(-1, 1, samples) * 0.1
bubbles = np.sin(2 * np.pi * (200 + 100 * np.sin(20 * t)) * t) * 0.1
env = np.ones(samples) * 0.5
env[:int(samples * 0.1)] = np.linspace(0, 0.5, int(samples * 0.1))
env[int(samples * 0.8):] = np.linspace(0.5, 0, samples - int(samples * 0.8))
return ((base + bubbles) * env * vol).astype(np.float64)
def write_wav(filename, samples):
samples = np.clip(samples, -1.0, 1.0)
int_samples = (samples * 32767).astype(np.int16)
with wave.open(filename, 'w') as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(SAMPLE_RATE)
wf.writeframes(int_samples.tobytes())
def pad_audio(audio, target_dur):
target_len = int(SAMPLE_RATE * target_dur)
if len(audio) < target_len:
return np.concatenate([audio, silence((target_len - len(audio)) / SAMPLE_RATE)])
return audio[:target_len]
# ── Frame management ────────────────────────────────────────────────────────
frame_counter = [0]
audio_parts = []
def add_frame(img, do_scanline=True):
if do_scanline:
img = scanlines(img, 35)
path = os.path.join(FRAMES_DIR, f"frame_{frame_counter[0]:05d}.png")
img.save(path)
frame_counter[0] += 1
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 1: ALARM / MORNING BOOT (2.5s = 60 frames)
# Waking up at noon. The "office" is a bed.
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 1: Morning boot...")
f_sm = get_font(13)
f_md = get_font(18)
f_lg = get_font(30)
f_xl = get_bold(48)
f_xxl = get_bold(64)
f_title = get_bold(40)
f_sans = get_sans(16)
f_sans_lg = get_sans(24)
for i in range(60):
# Dark bedroom blue-gray
brightness = min(i * 2, 40)
img = make_img((brightness // 3, brightness // 3, brightness))
draw = ImageDraw.Draw(img)
# Clock display
clock_y = 30
if i < 20:
# Alarm blinking
time_str = "12:47 PM" if i % 6 < 3 else " "
clock_color = (255, 60, 60) if i % 6 < 3 else (80, 20, 20)
else:
time_str = "12:47 PM"
clock_color = (255, 80, 80)
clock_font = get_bold(56)
draw_centered(draw, time_str, clock_y, clock_font, fill=clock_color)
# "snooze count"
if i > 8:
snooze_count = min((i - 8) // 4, 7)
draw_centered(draw, f"snoozed {snooze_count}x", clock_y + 65, f_md, fill=(180, 100, 100))
# After snoozing: show notification avalanche
if i > 30:
notif_y = 160
notifs = [
("Slack", "12 new messages", (74, 21, 75)),
("Gmail", "CLIENT: URGENT!!!", (219, 68, 55)),
("Upwork", "New job invite", (20, 168, 0)),
("GitHub", "PR review requested", (36, 41, 46)),
("Jira", "Sprint ended 3 days ago", (0, 82, 204)),
]
shown = min((i - 30) // 4 + 1, len(notifs))
for j in range(shown):
app, msg, color = notifs[j]
ny = notif_y + j * 44
# Notification card sliding in from right
slide_x = max(0, (W - 40) - (i - 30 - j * 4) * 40)
slide_x = min(slide_x, W)
draw.rounded_rectangle([(slide_x + 30, ny), (slide_x + W - 30, ny + 38)],
radius=8, fill=(40, 40, 50), outline=color)
draw.text((slide_x + 45, ny + 4), app, font=get_bold(13), fill=color)
draw.text((slide_x + 45, ny + 20), msg, font=f_sm, fill=(200, 200, 200))
# Glitch when alarm goes off
if i < 20 and i % 6 < 2:
img = chromatic(img, 4)
if i == 25 or i == 26:
img = glitch(img, 15)
add_frame(img)
# Audio: alarm beeping, then notification dings cascading
scene1_audio = alarm_clock(0.8, 0.35)
scene1_audio = np.concatenate([scene1_audio, silence(0.5)])
# Notification dings
for j in range(6):
scene1_audio = np.concatenate([scene1_audio, notification_ding(0.15, 0.25), silence(0.08)])
scene1_audio = pad_audio(scene1_audio, 2.5)
audio_parts.append(scene1_audio)
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 2: THE "OFFICE" (2s = 48 frames)
# Laptop on bed, coffee, cat on keyboard, pajamas
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 2: The office...")
for i in range(48):
# Warm bedroom tones
img = make_img((45, 35, 30))
draw = ImageDraw.Draw(img)
# Draw a simplified "desk" (it's a bed)
# Blanket
draw.rectangle([(0, H - 180), (W, H)], fill=(70, 55, 80))
# Wrinkles
for w in range(5):
wy = H - 180 + w * 35
draw.arc([(w * 130, wy), (w * 130 + 150, wy + 40)], 0, 180, fill=(60, 45, 70), width=2)
# Laptop shape
lx, ly = 160, H - 280
# Screen
draw.rectangle([(lx, ly), (lx + 300, ly + 180)], fill=(30, 30, 40), outline=(80, 80, 80), width=2)
# Screen content: code
code_lines = [
"function fixBug() {",
" // TODO: fix later",
" // TODO: fix later (2)",
" // FIX THIS BEFORE",
" // DEMO TOMORROW",
" return null; // works",
"}",
]
for ci, cl in enumerate(code_lines):
if ci < (i // 5):
cc = (0, 200, 100) if "//" not in cl else (120, 120, 120)
if "TODO" in cl:
cc = (255, 200, 0)
if "works" in cl:
cc = (255, 80, 80)
draw.text((lx + 15, ly + 12 + ci * 20), cl, font=get_font(12), fill=cc)
# Keyboard base
draw.rectangle([(lx - 10, ly + 180), (lx + 310, ly + 200)], fill=(60, 60, 65), outline=(80, 80, 80))
# Coffee mug
mx = lx + 330
my = ly + 100
draw.rectangle([(mx, my), (mx + 35, my + 45)], fill=(80, 50, 30), outline=(120, 80, 50), width=2)
draw.arc([(mx + 30, my + 8), (mx + 50, my + 35)], -90, 90, fill=(120, 80, 50), width=2)
# Steam
if i % 8 < 5:
steam_off = int(3 * math.sin(i * 0.5))
draw.text((mx + 8 + steam_off, my - 18), "~", font=f_md, fill=(200, 200, 200))
draw.text((mx + 18 + steam_off, my - 25), "~", font=f_md, fill=(180, 180, 180))
# Label
label_alpha = min(i * 8, 255)
if i > 10:
arrow_text = '"The Office"'
draw_centered(draw, arrow_text, 25, f_xl, fill=(255, 255, 255))
# Cat paw appearing on keyboard
if i > 30:
paw_x = lx + 80 + int(10 * math.sin(i * 0.4))
paw_y = ly + 170
# Paw pad
draw.ellipse([(paw_x, paw_y), (paw_x + 30, paw_y + 20)], fill=(60, 50, 50))
for toe in range(3):
tx = paw_x + 5 + toe * 10
draw.ellipse([(tx, paw_y - 8), (tx + 8, paw_y + 2)], fill=(60, 50, 50))
if i > 38:
# Cat typing gibberish on screen
gibberish = "asdfjkl;qwer" if i % 4 < 2 else "zxcvbnm,./!@"
draw.text((lx + 15, ly + 12 + 7 * 20), gibberish, font=get_font(12), fill=(255, 100, 100))
if i > 35 and random.random() < 0.2:
img = chromatic(img, 3)
add_frame(img)
# Audio: keyboard typing + coffee sounds + cat meow-like noise
scene2_audio = []
for j in range(48):
chunk = 1.0 / FPS
if j > 5 and random.random() < 0.6:
click = keyboard_click(0.015, 0.2)
pad = max(0, chunk - len(click) / SAMPLE_RATE)
scene2_audio.append(click)
if pad > 0:
scene2_audio.append(silence(pad))
else:
scene2_audio.append(silence(chunk))
# Cat keyboard smash sound
scene2_audio.append(noise(0.1, 0.3))
scene2_audio = np.concatenate(scene2_audio)
scene2_audio = pad_audio(scene2_audio, 2.0)
audio_parts.append(scene2_audio)
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 3: CLIENT MESSAGES (3s = 72 frames)
# The horror of vague client requests, YTP stutter style
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 3: Client messages...")
client_msgs = [
("Client", "Hey, quick question", (0, 132, 255)),
("Client", "Can you make it pop more?", (0, 132, 255)),
("Client", "Like... more modern", (0, 132, 255)),
("You", "Could you be more specific?", (0, 200, 80)),
("Client", "You know... like Apple", (0, 132, 255)),
("Client", "But also like Google", (0, 132, 255)),
("Client", "But unique", (0, 132, 255)),
("You", "...", (0, 200, 80)),
("Client", "Also my nephew knows HTML", (0, 132, 255)),
("Client", "He said this should take 1 hour", (0, 132, 255)),
("Client", "Can you also add AI to it?", (0, 132, 255)),
("Client", "My budget is $50", (0, 132, 255)),
]
stutter_indices = {4, 5, 9, 11} # messages that get YTP stuttered
for i in range(72):
# iMessage-style background
img = make_img((28, 28, 30))
draw = ImageDraw.Draw(img)
# Header bar
draw.rectangle([(0, 0), (W, 50)], fill=(44, 44, 46))
draw.text((20, 15), "< Messages", font=f_sans, fill=(0, 122, 255))
draw_centered(draw, "Client (DO NOT ANSWER)", 15, get_bold(16), fill=(255, 255, 255))
# Show messages up to current frame
msg_idx = min(i // 5, len(client_msgs) - 1)
visible_start = max(0, msg_idx - 6)
y_pos = 65
for j in range(visible_start, msg_idx + 1):
sender, text, color = client_msgs[j]
is_you = sender == "You"
# Stutter effect on certain messages
if j in stutter_indices and i % 4 < 2 and (i // 5) == j:
# Cut the text short and repeat fragment
words = text.split()
cut = max(1, len(words) // 2)
text = " ".join(words[:cut]) + "- " + " ".join(words[:cut]) + "-"
# Bubble dimensions
bbox = draw.textbbox((0, 0), text, font=f_sans)
tw = bbox[2] - bbox[0]
bw = min(tw + 24, W - 100)
bh = 34
if is_you:
bx = W - bw - 20
bubble_color = (0, 122, 255)
text_color = (255, 255, 255)
else:
bx = 20
bubble_color = (58, 58, 60)
text_color = (255, 255, 255)
draw.rounded_rectangle([(bx, y_pos), (bx + bw, y_pos + bh)],
radius=14, fill=bubble_color)
draw.text((bx + 12, y_pos + 7), text, font=f_sans, fill=text_color)
y_pos += bh + 8
# Typing indicator for client
if i % 10 < 7 and (i // 5) < len(client_msgs) - 1:
ty = y_pos + 5
draw.rounded_rectangle([(20, ty), (80, ty + 30)], radius=14, fill=(58, 58, 60))
dots = "..." if i % 6 < 2 else ".. " if i % 6 < 4 else ". "
draw.text((30, ty + 3), dots, font=get_bold(18), fill=(150, 150, 150))
# Budget reveal: red flash
if msg_idx == len(client_msgs) - 1 and i > 55:
flash = (i - 55) % 8
if flash < 3:
img = make_img((180, 0, 0))
draw = ImageDraw.Draw(img)
draw_centered(draw, "$50", H // 2 - 40, get_bold(100), fill=(255, 255, 255))
draw_centered(draw, "FOR THE WHOLE APP", H // 2 + 50, f_lg, fill=(255, 200, 200))
# YTP glitches
if i in [24, 25, 45, 46, 60, 61]:
img = glitch(img, 20)
if i in [26, 47, 62]:
img = chromatic(img, 10)
if i > 60 and random.random() < 0.4:
img = color_corrupt(img)
add_frame(img)
# Audio: notification dings, stutter, then sad trombone at $50
scene3_audio = []
for j in range(72):
chunk = 1.0 / FPS
if j % 5 == 0 and j < 60:
ding = notification_ding(chunk, 0.2)
scene3_audio.append(ding[:int(SAMPLE_RATE * chunk)])
elif j in [24, 45]:
burst = noise(chunk, 0.5)
scene3_audio.append(burst[:int(SAMPLE_RATE * chunk)])
else:
scene3_audio.append(silence(chunk))
# Sad trombone for $50
scene3_audio.append(sad_trombone(0.6, 0.3))
scene3_audio = np.concatenate(scene3_audio)
scene3_audio = pad_audio(scene3_audio, 3.0)
audio_parts.append(scene3_audio)
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 4: SCOPE CREEP (3s = 72 frames)
# Requirements doc that grows and mutates uncontrollably
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 4: Scope creep...")
original_scope = [
"[ ] Simple landing page",
"[ ] Contact form",
"[ ] Mobile responsive",
]
creep_additions = [
"[ ] User authentication",
"[ ] Payment processing",
"[ ] Real-time chat",
"[ ] Admin dashboard",
"[ ] Mobile app (iOS + Android)",
"[ ] AI chatbot integration",
"[ ] Blockchain verification",
"[ ] AR/VR experience",
"[ ] Social media platform",
"[ ] Operating system",
"[ ] Cure for cancer",
"[ ] Time machine",
"[ ] FIX MY MARRIAGE",
]
for i in range(72):
# Document background
img = make_img((250, 245, 235))
draw = ImageDraw.Draw(img)
# Document header
draw.rectangle([(30, 15), (W - 30, 55)], fill=(220, 215, 205))
draw.text((40, 22), "requirements_FINAL_v23_REAL_FINAL.docx", font=f_sm, fill=(100, 80, 60))
# Original scope
y_pos = 70
draw.text((40, y_pos), "Project Scope:", font=get_bold(18), fill=(40, 40, 40))
y_pos += 30
for item in original_scope:
draw.text((50, y_pos), item, font=f_sm, fill=(60, 60, 60))
y_pos += 20
# Creeping additions
n_creep = min(i // 4, len(creep_additions))
if n_creep > 0:
draw.line([(50, y_pos), (W - 50, y_pos)], fill=(200, 50, 50), width=1)
y_pos += 10
draw.text((50, y_pos), '"Oh and also..."', font=get_bold(14), fill=(200, 50, 50))
y_pos += 22
for j in range(n_creep):
item = creep_additions[j]
# Later items get more unhinged colors
if j < 4:
color = (180, 80, 0)
elif j < 8:
color = (200, 0, 0)
else:
color = random.choice([(255, 0, 0), (200, 0, 200), (0, 0, 200)])
shake_x = random.randint(-j, j)
shake_y = random.randint(-max(j // 2, 1), max(j // 2, 1))
font_use = f_sm if j < 6 else get_bold(13 + j)
if y_pos + shake_y < H - 20:
draw.text((50 + shake_x, y_pos + shake_y), item, font=font_use, fill=color)
y_pos += 20 + max(0, j - 5) * 2
# Budget stays the same - stamp
if i > 40:
stamp = Image.new("RGBA", (280, 50), (0, 0, 0, 0))
sd = ImageDraw.Draw(stamp)
sd.rectangle([(0, 0), (279, 49)], outline=(255, 0, 0), width=3)
sd.text((10, 10), "BUDGET: UNCHANGED", font=get_bold(22), fill=(255, 0, 0))
angle = random.randint(-8, 8)
stamp = stamp.rotate(angle, expand=True)
img.paste(stamp, (180, 50), stamp)
# Deadline counter getting closer
if i > 20:
days = max(0, 14 - (i - 20) // 3)
deadline_color = (0, 150, 0) if days > 7 else (255, 150, 0) if days > 3 else (255, 0, 0)
draw.rectangle([(W - 200, H - 45), (W - 10, H - 10)], fill=(40, 40, 40), outline=deadline_color, width=2)
draw.text((W - 190, H - 40), f"DEADLINE: {days} days", font=get_bold(14), fill=deadline_color)
if n_creep > 8 and random.random() < 0.25:
img = glitch(img, 15)
if n_creep > 10:
img = chromatic(img, random.randint(2, 8))
add_frame(img)
# Audio: each addition gets a descending tone + occasional noise stab
scene4_audio = []
base_freq = 600
for j in range(72):
chunk = 1.0 / FPS
if j % 4 == 0 and j < 56:
freq = max(100, base_freq - (j // 4) * 30) # descending dread
tone = sine(freq, min(0.08, chunk), 0.25)
scene4_audio.append(tone[:int(SAMPLE_RATE * chunk)])
pad = max(0, chunk - len(tone) / SAMPLE_RATE)
if pad > 0:
scene4_audio.append(silence(pad))
elif j > 56:
scene4_audio.append(noise(chunk, 0.1 + (j - 56) * 0.02))
else:
scene4_audio.append(silence(chunk))
audio_parts.append(pad_audio(np.concatenate(scene4_audio), 3.0))
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 5: 3 AM DEBUGGING (3s = 72 frames)
# Alone in darkness. Stack traces. Madness.
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 5: 3 AM debugging...")
stack_trace = [
"Traceback (most recent call last):",
' File "app.py", line 847, in handler',
" result = process_payment(user, cart)",
' File "payment.py", line 203, in process_payment',
" token = stripe.charge(amount=total)",
' File "stripe.py", line 42, in charge',
" raise PaymentError('lol no')",
"PaymentError: lol no",
"",
"# THIS WORKED 5 MINUTES AGO",
"# I DIDN'T CHANGE ANYTHING",
"# WHY",
]
search_queries = [
"python PaymentError lol no",
"stripe charge error 2024",
"why does code hate me",
"stackoverflow paymentError",
"is programming real",
"freelance developer burnout",
"career change options 3am",
"how to become a farmer",
]
for i in range(72):
# Dark with terminal glow
img = make_img((8, 8, 12))
draw = ImageDraw.Draw(img)
# Clock in corner: 3:XX AM
minute = 14 + i // 5
draw.text((W - 110, 10), f"3:{minute:02d} AM", font=f_md, fill=(255, 60, 60))
# Coffee count
coffees = 4 + i // 15
coffee_str = "C" * min(coffees, 12) + ("+" + str(coffees - 12) if coffees > 12 else "")
draw.text((10, 10), f"[{coffee_str}]", font=f_sm, fill=(139, 90, 43))
if i < 35:
# Stack trace appearing
lines_show = min(i // 2 + 1, len(stack_trace))
for j in range(lines_show):
line = stack_trace[j]
if line.startswith("#"):
color = (255, 100, 100)
elif "Error" in line or "raise" in line:
color = (255, 60, 60)
elif line.startswith(" File"):
color = (100, 180, 255)
elif line.startswith("Traceback"):
color = (255, 255, 100)
else:
color = (180, 180, 180)
draw.text((15, 40 + j * 18), line, font=get_font(12), fill=color)
# Cursor blinking
if i % 6 < 3:
cursor_y = 40 + lines_show * 18
draw.text((15, cursor_y), ">>> _", font=get_font(12), fill=(0, 255, 0))
else:
# Increasingly desperate Google searches
search_idx = min((i - 35) // 5, len(search_queries) - 1)
query = search_queries[search_idx]
# Search bar
draw.rounded_rectangle([(40, 50), (W - 40, 85)], radius=20, fill=(255, 255, 255))
draw.text((60, 57), query, font=f_sans, fill=(40, 40, 40))
# Search icon
draw.ellipse([(W - 75, 57), (W - 60, 72)], outline=(100, 100, 100), width=2)
# "Results"
results_text = [
"Stack Overflow - closed as duplicate (2014)",
"GitHub Issue #4392 - wontfix",
"Reddit: 'just use a different library lol'",
"Medium: '10 WAYS TO FIX THIS' (paywall)",
]
for ri, rt in enumerate(results_text[:min((i - 35) // 3, 4)]):
ry = 110 + ri * 50
draw.text((50, ry), rt, font=f_sans, fill=(100, 150, 255))
draw.text((50, ry + 20), "This doesn't help at all.", font=get_font(11), fill=(120, 120, 120))
# Eyes getting redder (stress bar)
stress = min(i / 72, 1.0)
bar_w = int(stress * (W - 100))
bar_color = (int(255 * stress), int(255 * (1 - stress)), 0)
draw.rectangle([(50, H - 30), (50 + bar_w, H - 15)], fill=bar_color)
draw.rectangle([(50, H - 30), (W - 50, H - 15)], outline=(80, 80, 80), width=1)
draw.text((52, H - 46), "SANITY:", font=get_font(11), fill=(150, 150, 150))
if i > 50 and random.random() < 0.3:
img = glitch(img, 12)
if i > 60 and random.random() < 0.3:
img = chromatic(img, 6)
add_frame(img)
# Audio: eerie ambience + keyboard + increasing heartbeat
scene5_audio = []
for j in range(72):
chunk = 1.0 / FPS
t = np.linspace(0, chunk, int(SAMPLE_RATE * chunk), endpoint=False)
# Low drone
drone = np.sin(2 * np.pi * 50 * t) * 0.08
# Heartbeat getting faster
beat_freq = 1.0 + j * 0.03
heartbeat = np.sin(2 * np.pi * beat_freq * t) * 0.02
heartbeat = np.where(heartbeat > 0.01, 0.15, 0) * np.sin(2 * np.pi * 60 * t)
# Random keystrokes
if random.random() < 0.4:
click_pos = random.randint(0, max(1, len(t) - 500))
drone[click_pos:click_pos + min(400, len(t) - click_pos)] += noise(min(400, len(t) - click_pos) / SAMPLE_RATE, 0.15)
scene5_audio.append(drone + heartbeat)
audio_parts.append(pad_audio(np.concatenate(scene5_audio), 3.0))
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 6: INVOICE HELL (2.5s = 60 frames)
# Sending invoices into the void. "Payment pending" forever.
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 6: Invoice hell...")
for i in range(60):
img = make_img((245, 245, 250))
draw = ImageDraw.Draw(img)
# Invoice document
draw.rectangle([(60, 20), (W - 60, H - 20)], fill=(255, 255, 255), outline=(200, 200, 200), width=1)
# Header
draw.text((80, 35), "INVOICE #00847", font=get_bold(28), fill=(40, 40, 40))
draw.line([(80, 70), (W - 80, 70)], fill=(200, 200, 200), width=1)
# Line items
items = [
("Landing page design & dev", "$2,500"),
("'Quick' revisions (x47)", "$3,200"),
("Weekend emergency fix", "$800"),
("Unpaid 'discovery call' (2hrs)", "$0"),
("Therapy (work-related)", "$400"),
]
y = 85
for idx, (desc, price) in enumerate(items):
if idx <= i // 8:
draw.text((80, y), desc, font=f_sm, fill=(60, 60, 60))
draw.text((W - 150, y), price, font=f_sm, fill=(40, 40, 40))
y += 25
# Total
if i > 25:
draw.line([(80, y + 5), (W - 80, y + 5)], fill=(40, 40, 40), width=2)
draw.text((80, y + 12), "TOTAL:", font=get_bold(20), fill=(40, 40, 40))
draw.text((W - 170, y + 12), "$6,900", font=get_bold(20), fill=(40, 40, 40))
# Status stamps cycling
if i > 35:
statuses = [
("SENT", (0, 100, 200)),
("VIEWED", (200, 150, 0)),
("IGNORED", (200, 100, 0)),
("FOLLOW-UP SENT", (200, 80, 0)),
("READ: NO REPLY", (200, 50, 0)),
("PAST DUE", (220, 0, 0)),
("PAST DUE (90 DAYS)", (255, 0, 0)),
]
stat_idx = min((i - 35) // 3, len(statuses) - 1)
stat_text, stat_color = statuses[stat_idx]
stamp = Image.new("RGBA", (350, 60), (0, 0, 0, 0))
sd = ImageDraw.Draw(stamp)
sd.rectangle([(0, 0), (349, 59)], outline=stat_color, width=4)
sd.text((15, 12), stat_text, font=get_bold(28), fill=stat_color)
angle = random.randint(-12, 12)
stamp = stamp.rotate(angle, expand=True)
sx = random.randint(100, 250)
sy = random.randint(100, 250)
img.paste(stamp, (sx, sy), stamp)
# "NET 30" → "NET 60" → "NET NEVER"
if i > 45:
terms = ["NET 30", "NET 60", "NET 90", "NET ??", "NET NEVER", "NET LOL"]
t_idx = min((i - 45) // 2, len(terms) - 1)
draw.text((80, H - 50), f"Payment terms: {terms[t_idx]}", font=f_sm,
fill=(255, 0, 0) if t_idx > 2 else (100, 100, 100))
if i > 50 and random.random() < 0.3:
img = glitch(img, 10)
add_frame(img)
# Audio: cash register that never rings, then sad descending tones
scene6_audio = []
for j in range(60):
chunk = 1.0 / FPS
if j == 10:
cr = cash_register(chunk, 0.2)
scene6_audio.append(cr[:int(SAMPLE_RATE * chunk)])
elif j > 35 and j % 3 == 0:
# Descending sad tone for each status
freq = 400 - (j - 35) * 10
tone = sine(max(freq, 80), chunk, 0.2)
scene6_audio.append(tone[:int(SAMPLE_RATE * chunk)])
else:
scene6_audio.append(silence(chunk))
scene6_audio.append(sad_trombone(0.5, 0.25))
audio_parts.append(pad_audio(np.concatenate(scene6_audio), 2.5))
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 7: THE MEETING THAT COULD HAVE BEEN AN EMAIL (2.5s = 60 frames)
# A "15 min sync" that devours your entire afternoon
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 7: The meeting...")
for i in range(60):
# Zoom-call style background
img = make_img((25, 25, 35))
draw = ImageDraw.Draw(img)
# Zoom UI header
draw.rectangle([(0, 0), (W, 36)], fill=(35, 35, 35))
elapsed_min = 15 + i * 2 # Time RAPIDLY escalating
hrs = elapsed_min // 60
mins = elapsed_min % 60
time_color = (255, 255, 255) if elapsed_min < 30 else (255, 200, 0) if elapsed_min < 60 else (255, 0, 0)
draw.text((W // 2 - 50, 8), f"{hrs:01d}:{mins:02d}:00", font=get_bold(18), fill=time_color)
draw.text((15, 10), "Zoom Meeting", font=f_sm, fill=(150, 150, 150))
# Meeting title
if elapsed_min < 30:
title = '"Quick 15-min sync"'
elif elapsed_min < 60:
title = '"Quick 45-min sync"'
elif elapsed_min < 90:
title = '"Actually let me share my screen"'
else:
title = '"One more thing..."'
draw_centered(draw, title, 42, f_sans, fill=(180, 180, 180))
# Participant grid - 4 boxes
grid_w, grid_h = (W - 30) // 2, (H - 100) // 2
participants = [
("You (dying inside)", (60, 60, 70)),
("Client (sharing screen)", (50, 50, 65)),
("Client's boss (just listening)", (55, 55, 60)),
("Someone's kid (background)", (50, 60, 55)),
]
for pi, (name, bg) in enumerate(participants):
px = 10 + (pi % 2) * (grid_w + 10)
py = 65 + (pi // 2) * (grid_h + 10)
draw.rectangle([(px, py), (px + grid_w, py + grid_h)], fill=bg, outline=(60, 60, 60), width=1)
# Simple face in each box
cx = px + grid_w // 2
cy = py + grid_h // 2 - 10
face_r = 25
# Head
draw.ellipse([(cx - face_r, cy - face_r), (cx + face_r, cy + face_r)],
fill=(210, 180, 140), outline=(180, 150, 110))
# Eyes
eye_l = (cx - 10, cy - 5)
eye_r = (cx + 5, cy - 5)
if pi == 0: # You - increasingly dead-eyed
# Eyes getting more droopy
droop = min(i // 10, 5)
draw.ellipse([(eye_l[0], eye_l[1] + droop), (eye_l[0] + 6, eye_l[1] + 6 + droop)], fill=(40, 40, 40))
draw.ellipse([(eye_r[0], eye_r[1] + droop), (eye_r[0] + 6, eye_r[1] + 6 + droop)], fill=(40, 40, 40))
# Mouth: flat line getting sadder
mouth_droop = min(i // 8, 8)
draw.arc([(cx - 10, cy + 8), (cx + 10, cy + 15 + mouth_droop)], 0, 180, fill=(120, 60, 60), width=2)
elif pi == 3: # Kid - bouncing
bounce = int(5 * math.sin(i * 0.8))
draw.ellipse([(eye_l[0], eye_l[1] + bounce), (eye_l[0] + 6, eye_l[1] + 6 + bounce)], fill=(40, 40, 40))
draw.ellipse([(eye_r[0], eye_r[1] + bounce), (eye_r[0] + 6, eye_r[1] + 6 + bounce)], fill=(40, 40, 40))
draw.arc([(cx - 8, cy + 10 + bounce), (cx + 8, cy + 18 + bounce)], 0, 180, fill=(200, 100, 100), width=2)
else:
draw.ellipse([(eye_l[0], eye_l[1]), (eye_l[0] + 6, eye_l[1] + 6)], fill=(40, 40, 40))
draw.ellipse([(eye_r[0], eye_r[1]), (eye_r[0] + 6, eye_r[1] + 6)], fill=(40, 40, 40))
# Talking mouth
mouth_open = 3 + int(3 * abs(math.sin(i * 0.6)))
draw.ellipse([(cx - 6, cy + 10), (cx + 6, cy + 10 + mouth_open)], fill=(150, 80, 80))
# Name label
draw.text((px + 5, py + grid_h - 18), name, font=get_font(10), fill=(255, 255, 255))
# "This meeting could have been an email" watermark growing
if i > 35:
watermark_alpha = min((i - 35) * 10, 200)
wm_color = (watermark_alpha, watermark_alpha // 3, watermark_alpha // 3)
wm_font = get_bold(14 + (i - 35) // 2)
draw_centered(draw, "THIS COULD HAVE BEEN AN EMAIL", H - 35, wm_font, fill=wm_color)
if i > 45 and random.random() < 0.3:
img = glitch(img, 8)
add_frame(img)
# Audio: muffled talking sound + occasional "you're on mute"
scene7_audio = []
for j in range(60):
chunk = 1.0 / FPS
t = np.linspace(0, chunk, int(SAMPLE_RATE * chunk), endpoint=False)
# Muffled voice simulation (filtered noise)
voice = np.sin(2 * np.pi * 180 * t) * 0.05
voice += np.sin(2 * np.pi * 250 * t * (1 + 0.3 * np.sin(5 * t))) * 0.04
if random.random() < 0.3:
voice += noise(chunk, 0.02)
scene7_audio.append(voice)
# "You're on mute" beep
scene7_audio.append(sine(600, 0.1, 0.3))
audio_parts.append(pad_audio(np.concatenate(scene7_audio), 2.5))
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 8: IMPOSTER SYNDROME (3s = 72 frames)
# LinkedIn vs Reality. "Full-stack ninja" vs googling "how to center a div"
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 8: Imposter syndrome...")
linkedin_bio = [
"FULL-STACK NINJA",
"10x ENGINEER",
"AI / ML / BLOCKCHAIN",
"THOUGHT LEADER",
"DISRUPTING THE SPACE",
"SERIAL ENTREPRENEUR",
]
reality = [
"googling: 'how to center a div'",
"googling: 'javascript sort numbers'",
"googling: 'git undo everything'",
"googling: 'what is a monad'",
"copy-pasting from Stack Overflow",
"asking ChatGPT to fix ChatGPT's code",
]
for i in range(72):
img = make_img((0, 0, 0))
draw = ImageDraw.Draw(img)
# Split screen: LinkedIn left, Reality right
mid = W // 2
# Left: LinkedIn (polished blue)
draw.rectangle([(0, 0), (mid - 2, H)], fill=(0, 50, 100))
draw.text((15, 15), "LinkedIn", font=get_bold(22), fill=(0, 119, 181))
# Profile pic (simple avatar)
draw.ellipse([(mid // 2 - 30, 55), (mid // 2 + 30, 115)], fill=(200, 170, 140))
draw.ellipse([(mid // 2 - 20, 70), (mid // 2 + 20, 105)], fill=(210, 180, 150))
# Bio cycling
bio_idx = (i // 8) % len(linkedin_bio)
bio = linkedin_bio[bio_idx]
draw_shadow(draw, bio, 20, 130, get_bold(min(18, 300 // max(len(bio), 1))),
fill=(255, 255, 255), shadow=(0, 30, 60))
# Endorsements
if i > 20:
draw.text((20, 165), "1,247 connections", font=f_sm, fill=(150, 200, 255))
draw.text((20, 185), "99+ endorsements", font=f_sm, fill=(150, 200, 255))
draw.text((20, 205), '"Open to opportunities"', font=f_sm, fill=(100, 255, 100))
# Right: Reality (dark terminal)
draw.rectangle([(mid + 2, 0), (W, H)], fill=(15, 15, 20))
draw.text((mid + 15, 15), "Reality", font=get_bold(22), fill=(255, 80, 80))
reality_idx = min(i // 10, len(reality) - 1)
reality_text = reality[reality_idx]
draw.text((mid + 15, 60), reality_text, font=get_font(12), fill=(180, 180, 180))
# Browser tab showing embarrassing searches
if i > 15:
tab_y = 90
for ri in range(min((i - 15) // 10 + 1, 4)):
tab_text = ["Stack Overflow - Visited 847 times today",
"W3Schools - How to HTML",
"YouTube - 'CSS Flexbox in 100 seconds'",
"Reddit - r/learnprogramming"][ri]
draw.text((mid + 15, tab_y + ri * 22), tab_text, font=get_font(10), fill=(120, 120, 140))
# Hours spent on each
if i > 40:
draw.text((mid + 15, 200), "Time coding today: 1hr", font=f_sm, fill=(100, 100, 100))
draw.text((mid + 15, 218), "Time on YouTube: 5hrs", font=f_sm, fill=(255, 100, 100))
draw.text((mid + 15, 236), "Time on Twitter: 3hrs", font=f_sm, fill=(100, 150, 255))
# Divider line glitching
div_x = mid + random.randint(-3, 3) if i > 40 else mid
draw.line([(div_x, 0), (div_x, H)], fill=(255, 255, 0) if i > 50 else (100, 100, 100), width=2)
# "THEY'RE GOING TO FIND OUT" text appearing
if i > 55:
alpha = min((i - 55) * 20, 255)
fear_color = (alpha, 0, 0)
fear_size = 18 + (i - 55)
draw_centered(draw, "THEY'RE GOING TO FIND OUT", H - 60, get_bold(min(fear_size, 36)), fill=fear_color)
if i > 55 and random.random() < 0.4:
img = glitch(img, 15)
if i > 60 and random.random() < 0.3:
img = chromatic(img, 8)
if i > 65:
img = tv_static(img, 0.08)
add_frame(img)
# Audio: corporate elevator music left, sad beeps right
scene8_audio = []
# Cheesy corporate melody (left channel concept, but mono so mix them)
melody_notes = [523, 587, 659, 523, 587, 659, 784, 659, 587, 523] # C major ascending
for j in range(72):
chunk = 1.0 / FPS
note = melody_notes[j % len(melody_notes)]
t = np.linspace(0, chunk, int(SAMPLE_RATE * chunk), endpoint=False)
# Cheesy synth
corp = np.sin(2 * np.pi * note * t) * 0.08
# Anxiety undertone growing
anxiety = np.sin(2 * np.pi * 80 * t) * (j / 200)
scene8_audio.append(corp + anxiety)
audio_parts.append(pad_audio(np.concatenate(scene8_audio), 3.0))
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 9: THE GRIND MONTAGE (2.5s = 60 frames)
# Rapid-fire day-in-the-life: code, coffee, code, cry, code, repeat
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 9: The grind montage...")
montage_cards = [
("MON", "Fix 'small' bug", "(8 hours later)", (100, 100, 180)),
("TUE", "Client: 'one tiny change'", "rebuild everything", (180, 100, 100)),
("WED", "Deploy to production", "everything is on fire", (180, 50, 50)),
("THU", "4 meetings", "0 lines of code", (100, 100, 100)),
("FRI", "Finally coding!", "'quick call at 4pm'", (80, 150, 80)),
("SAT", "'just checking slack'", "(works until 2am)", (150, 100, 180)),
("SUN", "'rest day'", "(opens laptop at noon)", (60, 60, 80)),
("REPEAT", "REPEAT", "REPEAT", (200, 0, 0)),
]
for i in range(60):
card_idx = min(i // 7, len(montage_cards) - 1)
day, line1, line2, bg = montage_cards[card_idx]
img = make_img(bg)
draw = ImageDraw.Draw(img)
# Day label
draw_centered(draw, day, 60, get_bold(72), fill=(255, 255, 255))
# Subtitle
draw_centered(draw, line1, 180, f_lg, fill=(255, 255, 255))
draw_centered(draw, line2, 230, f_md, fill=(200, 200, 200))
# Progress bar for the week
week_progress = min(i / 56, 1.0)
bar_y = H - 50
draw.rectangle([(40, bar_y), (W - 40, bar_y + 15)], outline=(200, 200, 200), width=1)
draw.rectangle([(40, bar_y), (40 + int((W - 80) * week_progress), bar_y + 15)],
fill=(255, 200, 0))
draw.text((W // 2 - 30, bar_y + 18), "THE GRIND", font=f_sm, fill=(200, 200, 200))
# Git commit counter
if i > 10:
commits = i * 3
draw.text((W - 140, 15), f"commits: {commits}", font=f_sm, fill=(0, 200, 0))
# Flash transition between days
if i % 7 == 0 and i > 0:
img = tv_static(img, 0.5)
if card_idx == len(montage_cards) - 1:
img = glitch(img, 20)
if random.random() < 0.4:
img = color_corrupt(img)
add_frame(img)
# Audio: drum loop that speeds up
scene9_audio = []
bpm = 120
for j in range(60):
beat_dur = 60.0 / (bpm + j * 2) # accelerating
chunk = 1.0 / FPS
if j % 4 == 0:
scene9_audio.append(drum_kick(min(beat_dur, 0.15), 0.4))
elif j % 4 == 2:
scene9_audio.append(drum_snare(min(beat_dur, 0.1), 0.3))
elif j % 2 == 1:
scene9_audio.append(drum_hihat(0.04, 0.15))
else:
scene9_audio.append(silence(0.02))
remaining = chunk - len(scene9_audio[-1]) / SAMPLE_RATE
if remaining > 0:
scene9_audio.append(silence(remaining))
audio_parts.append(pad_audio(np.concatenate(scene9_audio), 2.5))
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 10: FREEDOM? (2s = 48 frames)
# The bittersweet "but at least I'm my own boss" moment
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 10: Freedom moment...")
freedom_lines = [
"but hey...",
"no commute",
"no dress code",
"no boss looking over my shoulder",
"(just 7 clients doing it instead)",
"",
"I set my own hours",
"(all of them)",
"",
"I'm my own boss",
"(and my own HR, accountant,",
" marketer, janitor, and therapist)",
]
for i in range(48):
# Sunset gradient background
t_norm = i / 48
r = int(40 + 60 * t_norm)
g = int(20 + 40 * t_norm)
b = int(60 + 40 * (1 - t_norm))
img = make_img((r, g, b))
draw = ImageDraw.Draw(img)
# Draw horizon
horizon_y = int(H * 0.6)
# Sky gradient
for row in range(horizon_y):
pct = row / horizon_y
sr = int(r * (1 - pct) + 180 * pct * t_norm)
sg = int(g * (1 - pct) + 100 * pct * t_norm)
sb = int(b * (1 - pct) + 40 * pct)
draw.line([(0, row), (W, row)], fill=(min(sr, 255), min(sg, 255), min(sb, 255)))
# Sun
sun_y = int(horizon_y - 30 + 15 * t_norm)
sun_x = W // 2
for ring in range(5, 0, -1):
sun_alpha = 80 + ring * 30
draw.ellipse([(sun_x - ring * 15, sun_y - ring * 15),
(sun_x + ring * 15, sun_y + ring * 15)],
fill=(min(sun_alpha + 150, 255), min(sun_alpha + 80, 255), sun_alpha // 2))
draw.ellipse([(sun_x - 25, sun_y - 25), (sun_x + 25, sun_y + 25)], fill=(255, 200, 80))
# Laptop silhouette at horizon
draw.rectangle([(sun_x - 40, horizon_y - 5), (sun_x + 40, horizon_y + 30)], fill=(20, 20, 30))
draw.rectangle([(sun_x - 55, horizon_y + 30), (sun_x + 55, horizon_y + 35)], fill=(20, 20, 30))
# Tiny screen glow
draw.rectangle([(sun_x - 35, horizon_y), (sun_x + 35, horizon_y + 25)], fill=(50, 60, 80))
# Text lines fading in
line_idx = min(i // 4, len(freedom_lines) - 1)
y_pos = 20
for li in range(line_idx + 1):
if li < len(freedom_lines):
line = freedom_lines[li]
if line:
is_paren = line.startswith("(")
color = (180, 180, 200) if is_paren else (255, 255, 255)
font_use = f_sm if is_paren else f_md
draw_centered(draw, line, y_pos, font_use, fill=color)
y_pos += 22
if random.random() < 0.1:
img = chromatic(img, 3)
add_frame(img)
# Audio: bittersweet melody
scene10_audio = []
# Gentle arpeggiated chords
chord_freqs = [
(261, 329, 392), # C major
(220, 277, 329), # A minor
(246, 311, 370), # B diminished-ish
(261, 329, 392), # C major resolve
]
for j in range(48):
chunk = 1.0 / FPS
t = np.linspace(0, chunk, int(SAMPLE_RATE * chunk), endpoint=False)
chord = chord_freqs[(j // 12) % len(chord_freqs)]
# Arpeggio: cycle through chord tones
note = chord[j % 3]
env = np.exp(-t * 3)
tone = np.sin(2 * np.pi * note * t) * env * 0.15
tone += np.sin(2 * np.pi * note * 2 * t) * env * 0.05 # octave shimmer
scene10_audio.append(tone)
audio_parts.append(pad_audio(np.concatenate(scene10_audio), 2.0))
# ═══════════════════════════════════════════════════════════════════════════
# SCENE 11: CRASH + TITLE CARD (3s = 72 frames)
# ═══════════════════════════════════════════════════════════════════════════
print("Scene 11: Crash and title...")
# 1 second of chaotic static with flashing freelance trauma
trauma_words = ["SCOPE CREEP", "$50", "URGENT", "NET NEVER", "3 AM",
"DEPLOY", "IT'S BROKEN", "INVOICE", "SYNC?", "ASAP",
"REVISION 47", "1 HOUR", "UNPAID", "PIVOT"]
for i in range(24):
img = make_img((0, 0, 0))
img = tv_static(img, 0.6 + random.random() * 0.3)
if random.random() < 0.6:
img = color_corrupt(img)
draw = ImageDraw.Draw(img)
if random.random() < 0.5:
word = random.choice(trauma_words)
f = get_bold(random.randint(24, 72))
x, y = random.randint(0, W - 100), random.randint(0, H - 50)
colors = [(255, 60, 60), (255, 200, 0), (0, 200, 255), (255, 0, 255), (0, 255, 100)]
draw.text((x, y), word, font=f, fill=random.choice(colors))
img = glitch(img, 20)
add_frame(img)
# 2 seconds: fade to title
for i in range(48):
img = make_img((0, 0, 0))
draw = ImageDraw.Draw(img)
fade = min(i / 18, 1.0)
alpha = int(255 * fade)
if i > 3:
color = (alpha, alpha, alpha)
draw_centered(draw, "git commit -m", H // 2 - 80, f_lg, fill=(int(alpha * 0.4), int(alpha * 0.7), int(alpha * 0.4)))
if i > 8:
title_color = (alpha, alpha, alpha)
draw_centered(draw, '"surviving"', H // 2 - 30, f_xxl, fill=title_color)
if i > 18:
sub_alpha = min((i - 18) * 12, 180)
draw_centered(draw, "a youtube poop about freelancing", H // 2 + 50, f_md,
fill=(sub_alpha, sub_alpha, sub_alpha))
if i > 25:
sub2_alpha = min((i - 25) * 10, 120)
draw_centered(draw, "no clients were harmed in the making of this video", H // 2 + 85, f_sm,
fill=(sub2_alpha, sub2_alpha, sub2_alpha))
draw_centered(draw, "(their invoices were though)", H // 2 + 102, f_sm,
fill=(sub2_alpha, sub2_alpha // 2, sub2_alpha // 2))
# Cursor blink
if i > 30 and (i % 8) < 4:
draw.text((W // 2 - 5, H - 50), "_", font=f_md, fill=(0, 180, 0))
if i < 8 and random.random() < 0.4:
img = tv_static(img, 0.08)
add_frame(img)
# Audio: noise crash into fadeout drone
crash = distort(noise(0.6, 0.8), 2.5)
drone = np.zeros(int(SAMPLE_RATE * 2.4))
t_drone = np.linspace(0, 2.4, int(SAMPLE_RATE * 2.4), endpoint=False)
drone += np.sin(2 * np.pi * 65 * t_drone) * 0.2
drone += np.sin(2 * np.pi * 98 * t_drone) * 0.1
drone *= np.linspace(1.0, 0.0, len(drone)) # fade out
final_audio = np.concatenate([crash, drone])
audio_parts.append(pad_audio(final_audio, 3.0))
# ═══════════════════════════════════════════════════════════════════════════
# WRITE AUDIO + REPORT
# ═══════════════════════════════════════════════════════════════════════════
print("Writing audio...")
full_audio = np.concatenate(audio_parts)
write_wav(AUDIO_FILE, full_audio)
total_frames = frame_counter[0]
total_duration = total_frames / FPS
audio_duration = len(full_audio) / SAMPLE_RATE
print(f"Generated {total_frames} frames ({total_duration:.1f}s video)")
print(f"Audio duration: {audio_duration:.1f}s")
print("Ready for ffmpeg render.")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment