Created
March 8, 2026 03:34
-
-
Save Stwissel/f203b3703043181bd174935660c2546b to your computer and use it in GitHub Desktop.
generate an image based on markdown blog entry
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env node | |
| /** | |
| * generate-cover.mjs | |
| * | |
| * Generates a cover image for a markdown blog post using: | |
| * 1. Anthropic API – crafts an optimal DALL·E image prompt from the post | |
| * 2. OpenAI DALL·E 3 – generates the image | |
| * | |
| * Output: <same-name-as-input>.webp alongside the markdown file | |
| * | |
| * Usage: | |
| * node --env-file=.env generate-cover.mjs path/to/post.md | |
| * | |
| * Reads API keys from a .env file in the current working directory: | |
| * ANTHROPIC_API_KEY=sk-ant-... | |
| * OPENAI_API_KEY=sk-... | |
| * | |
| * Dependencies (install once): | |
| * npm install @anthropic-ai/sdk openai sharp | |
| * | |
| * Requires Node.js 20.6+ (uses built-in --env-file flag) | |
| */ | |
| import fs from 'fs/promises'; | |
| import path from 'path'; | |
| import Anthropic from '@anthropic-ai/sdk'; | |
| import OpenAI from 'openai'; | |
| import sharp from 'sharp'; | |
| // ── Config ──────────────────────────────────────────────────────────────────── | |
| const IMAGE_SIZE = '1792x1024'; // DALL·E 3 wide-format (good for blog covers) | |
| const IMAGE_QUALITY = 'standard'; // "standard" | "hd" (hd doubles the cost) | |
| // ── Helpers ─────────────────────────────────────────────────────────────────── | |
| function die(msg) { | |
| console.error(`\nError: ${msg}\n`); | |
| process.exit(1); | |
| } | |
| async function readMarkdown(filePath) { | |
| try { | |
| return await fs.readFile(filePath, 'utf8'); | |
| } catch { | |
| die(`Cannot read file: ${filePath}`); | |
| } | |
| } | |
| /** Ask Claude to write a concise, vivid DALL·E prompt for this post. */ | |
| async function buildImagePrompt(anthropic, markdownContent) { | |
| console.log('→ Generating image prompt via Claude…'); | |
| const message = await anthropic.messages.create({ | |
| model: 'claude-opus-4-5', | |
| max_tokens: 300, | |
| messages: [ | |
| { | |
| role: 'user', | |
| content: `You are an expert at writing prompts for DALL·E 3 image generation. | |
| Read the blog post below and write a single, vivid image prompt (max 200 words) that: | |
| - Captures the post's core theme or mood | |
| - Works as an eye-catching blog cover image | |
| - for the style cycle through the following styles at random: | |
| - Photorealistic | |
| - Tasteful illustrative | |
| - Cyberpunk | |
| - Vintage | |
| - Minimalistic | |
| - Comic book | |
| - Contains NO text, letters, or words in the image | |
| - Avoids clichés (no light bulbs, no handshakes, no generic office scenes) | |
| Reply with ONLY the prompt — no preamble, no explanation. | |
| --- BEGIN POST --- | |
| ${markdownContent.slice(0, 4000)} | |
| --- END POST ---` | |
| } | |
| ] | |
| }); | |
| return message.content[0].text.trim(); | |
| } | |
| /** Call DALL·E 3 and return the image as a Buffer. */ | |
| async function generateImage(openai, prompt) { | |
| console.log('→ Generating image via DALL·E 3…'); | |
| console.log(` Prompt: ${prompt}\n`); | |
| const response = await openai.images.generate({ | |
| model: 'dall-e-3', | |
| prompt, | |
| n: 1, | |
| size: IMAGE_SIZE, | |
| quality: IMAGE_QUALITY, | |
| response_format: 'b64_json' | |
| }); | |
| const b64 = response.data[0].b64_json; | |
| return Buffer.from(b64, 'base64'); | |
| } | |
| /** Convert PNG buffer → WebP and save next to the markdown file. */ | |
| async function saveWebP(pngBuffer, mdFilePath) { | |
| const dir = path.dirname(mdFilePath); | |
| const base = path.basename(mdFilePath, path.extname(mdFilePath)); | |
| const outPath = path.join(dir, `${base}.webp`); | |
| await sharp(pngBuffer).webp({ quality: 85 }).toFile(outPath); | |
| return outPath; | |
| } | |
| /** Save the generated image prompt next to the markdown file. */ | |
| async function savePrompt(prompt, mdFilePath) { | |
| const dir = path.dirname(mdFilePath); | |
| const base = path.basename(mdFilePath, path.extname(mdFilePath)); | |
| const outPath = path.join(dir, `${base}.prompt`); | |
| await fs.writeFile(outPath, `${prompt}\n`, 'utf8'); | |
| return outPath; | |
| } | |
| /** Add heroImage to frontmatter if not already present. */ | |
| async function addHeroImage(mdFilePath) { | |
| const content = await fs.readFile(mdFilePath, 'utf8'); | |
| // Already has heroImage — nothing to do | |
| if (/^heroImage:/m.test(content)) return; | |
| // Derive relative path: everything after "documents/" without extension | |
| const match = mdFilePath.match(/documents\/(.+)\.[^.]+$/); | |
| if (!match) return; | |
| const heroValue = match[1]; | |
| // Insert heroImage before the closing --- of the frontmatter block | |
| const closingIdx = content.indexOf('\n---', content.indexOf('---') + 3); | |
| if (closingIdx === -1) return; | |
| const updated = content.slice(0, closingIdx) + `\nheroImage: ${heroValue}` + content.slice(closingIdx); | |
| await fs.writeFile(mdFilePath, updated, 'utf8'); | |
| console.log(`✓ Added heroImage: ${heroValue}`); | |
| } | |
| // ── Main ────────────────────────────────────────────────────────────────────── | |
| async function main() { | |
| const [, , inputFile] = process.argv; | |
| if (!inputFile) die('Usage: node generate-cover.mjs <path/to/post.md>'); | |
| const mdPath = path.resolve(inputFile); | |
| const anthropicKey = process.env.ANTHROPIC_API_KEY; | |
| const openaiKey = process.env.OPENAI_API_KEY; | |
| if (!anthropicKey) die('ANTHROPIC_API_KEY is not set.'); | |
| if (!openaiKey) die('OPENAI_API_KEY is not set.'); | |
| const anthropic = new Anthropic({ apiKey: anthropicKey }); | |
| const openai = new OpenAI({ apiKey: openaiKey }); | |
| const markdown = await readMarkdown(mdPath); | |
| const prompt = await buildImagePrompt(anthropic, markdown); | |
| const pngBuffer = await generateImage(openai, prompt); | |
| const imagePath = await saveWebP(pngBuffer, mdPath); | |
| const promptPath = await savePrompt(prompt, mdPath); | |
| await addHeroImage(mdPath); | |
| console.log(`✓ Cover image saved: ${imagePath}`); | |
| console.log(`✓ Prompt saved: ${promptPath}`); | |
| } | |
| main().catch((err) => { | |
| console.error(err); | |
| process.exit(1); | |
| }); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment