Skip to content

Instantly share code, notes, and snippets.

@ericboehs
Created March 1, 2026 21:41
Show Gist options
  • Select an option

  • Save ericboehs/78b60498946666599c02dddd672d362c to your computer and use it in GitHub Desktop.

Select an option

Save ericboehs/78b60498946666599c02dddd672d362c to your computer and use it in GitHub Desktop.
πŸ” chatgpt-history – CLI to access and sync ChatGPT conversation history via Safari
#!/usr/bin/env bash
#
# chatgpt-history - CLI to access ChatGPT conversation history via Safari
#
# Requires:
# - Safari open with a chatgpt.com tab (logged in)
# - Safari > Develop > Allow JavaScript from Apple Events (enabled)
# - jq installed
#
# Usage:
# chatgpt-history list [--limit N] List conversations (default: 20)
# chatgpt-history show <id> Show conversation content
# chatgpt-history search <query> Search conversations by title
# chatgpt-history export <id> Export conversation to markdown
# chatgpt-history help Show this help
set -euo pipefail
# Colors (disabled when piping)
if [[ -t 1 ]]; then
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
BLUE='\033[0;34m'
BOLD='\033[1m'
NC='\033[0m'
else
RED='' GREEN='' YELLOW='' BLUE='' BOLD='' NC=''
fi
# Cache file for last list results (enables numeric index lookups)
CACHE_DIR="${XDG_CACHE_HOME:-$HOME/.cache}/chatgpt-history"
CACHE_FILE="$CACHE_DIR/last-list.json"
# Request timeout in seconds (prevents hanging if Safari is unresponsive)
REQUEST_TIMEOUT="${CHATGPT_HISTORY_TIMEOUT:-30}"
# Sync defaults
SYNC_DIR="${CHATGPT_HISTORY_SYNC_DIR:-$HOME/Documents/ChatGPT-Conversations}"
SYNC_STATE_FILE="$CACHE_DIR/sync-state.json"
SYNC_DELAY=0.5 # seconds between API calls to avoid rate limiting
# Common jq filter for extracting messages from a conversation
JQ_EXTRACT_MESSAGES='
[.mapping | to_entries | sort_by(.value.message.create_time // 0)[] |
select(.value.message.author.role == "user" or .value.message.author.role == "assistant") |
.value.message |
select(.content.parts) |
{
role: .author.role,
content: ([.content.parts[]? |
if type == "string" then .
elif .text then .text
else empty
end
] | join("\n") | gsub(".?(cite)?.?(turn[0-9]+(search|view)[0-9]+)+.?"; ""))
}
]
'
# Check if Safari is running
check_safari() {
if ! pgrep -x "Safari" > /dev/null; then
echo -e "${RED}Error: Safari is not running${NC}" >&2
echo -e "${YELLOW}Please open Safari and navigate to chatgpt.com${NC}" >&2
return 1
fi
return 0
}
# Make authenticated request to ChatGPT backend API via Safari
chatgpt_request() {
local endpoint="$1"
# Check Safari is running first
check_safari || return 1
local result
result=$(timeout "$REQUEST_TIMEOUT" osascript -e '
tell application "Safari"
set chatGPTTab to null
repeat with d in documents
if URL of d contains "chatgpt.com" or URL of d contains "chat.openai.com" then
set chatGPTTab to d
exit repeat
end if
end repeat
if chatGPTTab is not null then
do JavaScript "
try {
var xhr = new XMLHttpRequest();
xhr.open(\"GET\", \"/api/auth/session\", false);
xhr.send(null);
if (xhr.status !== 200) {
JSON.stringify({error: \"Session request failed: \" + xhr.status});
} else {
var session = JSON.parse(xhr.responseText);
if (!session.accessToken) {
JSON.stringify({error: \"Not logged in to ChatGPT\"});
} else {
var xhr2 = new XMLHttpRequest();
xhr2.open(\"GET\", \"'"$endpoint"'\", false);
xhr2.setRequestHeader(\"Authorization\", \"Bearer \" + session.accessToken);
xhr2.send(null);
xhr2.responseText;
}
}
} catch(e) {
JSON.stringify({error: \"JavaScript error: \" + e.message});
}
" in chatGPTTab
else
return "{\"error\": \"No ChatGPT tab found in Safari. Open chatgpt.com first.\"}"
end if
end tell
' 2>&1)
local exit_code=$?
# Handle timeout
if [[ $exit_code -eq 124 ]]; then
echo '{"error": "Request timed out after '"$REQUEST_TIMEOUT"' seconds"}'
return 1
fi
# Handle other osascript errors
if [[ $exit_code -ne 0 ]]; then
echo '{"error": "Safari automation failed. Enable: Safari > Develop > Allow JavaScript from Apple Events"}'
return 1
fi
echo "$result"
}
# Check dependencies
check_deps() {
if ! command -v jq &> /dev/null; then
echo -e "${RED}Error: jq is required but not installed.${NC}" >&2
echo "Install with: brew install jq" >&2
exit 1
fi
}
# Check if fzf is available
has_fzf() {
command -v fzf &> /dev/null
}
# Resolve a conversation ID from partial match or numeric index
# Usage: resolve_id "6959" or resolve_id "1"
resolve_id() {
local input="$1"
# If it looks like a full UUID, return as-is
if [[ "$input" =~ ^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$ ]]; then
echo "$input"
return 0
fi
# Check if cache exists
if [[ ! -f "$CACHE_FILE" ]]; then
echo -e "${YELLOW}No cached list. Run 'chatgpt-history list' first, or use full UUID.${NC}" >&2
return 1
fi
# Warn if cache is stale (older than 1 hour)
local cache_age=$(( $(date +%s) - $(stat -f %m "$CACHE_FILE") ))
if [[ $cache_age -gt 3600 ]]; then
local hours=$(( cache_age / 3600 ))
echo -e "${YELLOW}Note: List cache is ${hours}h old. Run 'chatgpt-history list' to refresh.${NC}" >&2
fi
# If numeric, treat as 1-based index
if [[ "$input" =~ ^[0-9]+$ ]]; then
local index=$((input - 1))
local id
id=$(jq -r ".items[$index].id // empty" "$CACHE_FILE")
if [[ -z "$id" ]]; then
echo -e "${RED}Error: Index $input out of range${NC}" >&2
return 1
fi
echo "$id"
return 0
fi
# Otherwise, treat as UUID prefix
local matches
matches=$(jq -r --arg prefix "$input" '.items[] | select(.id | startswith($prefix)) | .id' "$CACHE_FILE")
local count
count=$(echo "$matches" | grep -c . || true)
if [[ "$count" -eq 0 ]]; then
echo -e "${RED}Error: No conversation ID starting with '$input'${NC}" >&2
return 1
elif [[ "$count" -gt 1 ]]; then
echo -e "${RED}Error: Multiple matches for '$input':${NC}" >&2
echo "$matches" | head -5 >&2
return 1
fi
echo "$matches"
return 0
}
# List conversations
cmd_list() {
local limit=20
local since_date=""
local date_filter=""
local verbose=false
while [[ $# -gt 0 ]]; do
case "$1" in
--limit|-l)
limit="$2"
shift 2
;;
--verbose|-v)
verbose=true
shift
;;
--today)
since_date=$(date +%Y-%m-%d)
shift
;;
--yesterday)
since_date=$(date -v-1d +%Y-%m-%d)
shift
;;
--this-week)
since_date=$(date -v-7d +%Y-%m-%d)
shift
;;
--this-month)
since_date=$(date -v-30d +%Y-%m-%d)
shift
;;
--since)
since_date="$2"
shift 2
;;
*)
shift
;;
esac
done
# Build date filter for jq
if [[ -n "$since_date" ]]; then
date_filter="| select(.create_time >= \"${since_date}\")"
# Fetch more to filter client-side
limit=100
fi
local response
response=$(chatgpt_request "/backend-api/conversations?offset=0&limit=$limit")
if echo "$response" | jq -e '.error' &>/dev/null; then
echo -e "${RED}Error: $(echo "$response" | jq -r '.error')${NC}" >&2
echo -e "${YELLOW}Make sure Safari has a ChatGPT tab open and you're logged in.${NC}" >&2
exit 1
fi
# Cache results for partial ID matching (before filtering)
mkdir -p "$CACHE_DIR"
echo "$response" > "$CACHE_FILE"
# Apply date filter if specified
local filtered
if [[ -n "$date_filter" ]]; then
filtered=$(echo "$response" | jq -r ".items[] $date_filter" | jq -s '.')
# Update cache with filtered results for index lookups
echo "{\"items\": $filtered}" > "$CACHE_FILE"
else
filtered=$(echo "$response" | jq '.items')
fi
local count
count=$(echo "$filtered" | jq 'length')
local has_more
has_more=$(echo "$response" | jq '.total > .limit')
# Format cache info
local cache_info="cached now"
if [[ -n "$since_date" ]]; then
echo -e "${BOLD}Conversations since $since_date ($count found, $cache_info):${NC}\n"
elif [[ "$has_more" == "true" ]]; then
echo -e "${BOLD}Conversations (showing $count, more available, $cache_info):${NC}\n"
else
echo -e "${BOLD}Conversations ($count total, $cache_info):${NC}\n"
fi
local idx=1
echo "$filtered" | jq -r '.[] | "\(.id)\t\(.create_time | split("T")[0])\t\(.title)"' | \
while IFS=$'\t' read -r id date title; do
if [[ "$verbose" == "true" ]]; then
printf "${YELLOW}%2d${NC} ${BLUE}%s${NC} ${GREEN}%s${NC} %s\n" "$idx" "$id" "$date" "$title"
else
printf "${YELLOW}%2d${NC} ${GREEN}%s${NC} %s\n" "$idx" "$date" "$title"
fi
((idx++))
done
}
# Show conversation content
cmd_show() {
local last_n=""
local conv_id=""
local format="markdown" # markdown (default), plain, json
while [[ $# -gt 0 ]]; do
case "$1" in
--last|-n)
last_n="$2"
shift 2
;;
--raw|-r)
format="plain"
shift
;;
--format|-f)
format="$2"
shift 2
;;
*)
if [[ -z "$conv_id" ]]; then
conv_id="$1"
fi
shift
;;
esac
done
if [[ -z "$conv_id" ]]; then
echo -e "${RED}Error: Missing conversation ID${NC}" >&2
echo "Usage: chatgpt-history show <id> [--last N] [--format markdown|plain|json]" >&2
exit 1
fi
# Validate format
case "$format" in
markdown|plain|json) ;;
*)
echo -e "${RED}Error: Invalid format '$format'. Use: markdown, plain, json${NC}" >&2
exit 1
;;
esac
# Resolve partial ID or index
conv_id=$(resolve_id "$conv_id") || exit 1
local response
response=$(chatgpt_request "/backend-api/conversation/$conv_id")
if echo "$response" | jq -e '.detail' &>/dev/null; then
echo -e "${RED}Error: $(echo "$response" | jq -r '.detail')${NC}" >&2
exit 1
fi
# Build jq filter based on --last flag
local last_filter=""
if [[ -n "$last_n" ]]; then
last_filter="| .[-${last_n}:]"
fi
case "$format" in
json)
# JSON format: structured output
echo "$response" | jq '
{
id: .conversation_id,
title: .title,
created: .create_time,
messages: ('"$JQ_EXTRACT_MESSAGES"')'"$last_filter"'
}
'
;;
plain)
# Plain mode: simple text, ideal for piping to Claude
echo "$response" | jq -r '
.title as $title |
"# \($title)\n",
(('"$JQ_EXTRACT_MESSAGES"')'"$last_filter"'[] |
"\(.role | ascii_upcase): \(.content)\n"
)
'
;;
markdown)
# Markdown mode with visual separators
echo "$response" | jq -r '
.title as $title |
.create_time as $created |
"# \($title)\n_Created: \($created)_\n",
(('"$JQ_EXTRACT_MESSAGES"')'"$last_filter"'[] |
(if .role == "user" then
"\n&#8203;\n\n─────────────────────────────────────────────────────────── **You** ───\n\n"
else
"\n&#8203;\n\n── **ChatGPT** ────────────────────────────────────────────────────────\n\n"
end) + .content
)
'
;;
esac
}
# Search conversations (uses ChatGPT's server-side search)
cmd_search() {
local query=""
local fetch_all=false
while [[ $# -gt 0 ]]; do
case "$1" in
--all|-a)
fetch_all=true
shift
;;
*)
if [[ -z "$query" ]]; then
query="$1"
fi
shift
;;
esac
done
if [[ -z "$query" ]]; then
echo -e "${RED}Error: Missing search query${NC}" >&2
echo "Usage: chatgpt-history search <query> [--all]" >&2
exit 1
fi
local all_items="[]"
local cursor=""
local page=1
while true; do
local endpoint
endpoint="/backend-api/conversations/search?query=$(printf '%s' "$query" | jq -sRr @uri)"
if [[ -n "$cursor" ]]; then
endpoint="${endpoint}&cursor=${cursor}"
fi
local response
response=$(chatgpt_request "$endpoint")
if echo "$response" | jq -e '.error' &>/dev/null; then
echo -e "${RED}Error: $(echo "$response" | jq -r '.error')${NC}" >&2
exit 1
fi
# Merge items
local new_items
new_items=$(echo "$response" | jq '.items // []')
all_items=$(echo "$all_items $new_items" | jq -s 'add')
# Check for more pages
cursor=$(echo "$response" | jq -r '.cursor // empty')
if [[ -z "$cursor" ]] || [[ "$fetch_all" != "true" ]]; then
break
fi
((page++))
echo -ne "\r${YELLOW}Fetching page $page...${NC}" >&2
done
# Clear progress if we paginated
if [[ $page -gt 1 ]]; then
echo -ne "\r\033[K" >&2
fi
local count
count=$(echo "$all_items" | jq 'length')
if [[ "$count" -eq 0 ]]; then
echo -e "${YELLOW}No conversations matching '$query'${NC}"
exit 0
fi
local more_available=""
if [[ -n "$cursor" ]] && [[ "$fetch_all" != "true" ]]; then
more_available=" (use --all for more)"
fi
echo -e "${BOLD}Search results for '$query' ($count found${more_available}):${NC}\n"
echo "$all_items" | jq -r '.[] | "\(.conversation_id)\t\(.update_time | todate | split("T")[0])\t\(.title)\t\(.payload.snippet // "")"' | \
while IFS=$'\t' read -r id date title snippet; do
echo -e "${BLUE}$id${NC} ${GREEN}$date${NC} $title"
if [[ -n "$snippet" ]]; then
# Truncate and clean up snippet for display
local short_snippet
short_snippet=$(echo "$snippet" | head -c 120 | tr '\n' ' ')
echo -e " ${YELLOW}...${short_snippet}...${NC}"
fi
done
}
# Export conversation to markdown
cmd_export() {
if [[ $# -lt 1 ]]; then
echo -e "${RED}Error: Missing conversation ID${NC}" >&2
echo "Usage: chatgpt-history export <id> [output_file]" >&2
exit 1
fi
local conv_id="$1"
local output_file="${2:-}"
# Resolve partial ID or index
conv_id=$(resolve_id "$conv_id") || exit 1
local response
response=$(chatgpt_request "/backend-api/conversation/$conv_id")
if echo "$response" | jq -e '.detail' &>/dev/null; then
echo -e "${RED}Error: $(echo "$response" | jq -r '.detail')${NC}" >&2
exit 1
fi
local markdown
markdown=$(echo "$response" | jq -r '
.title as $title |
.create_time as $created |
"# \($title)\n\n_Exported from ChatGPT on \(now | strftime("%Y-%m-%d"))_\n_Originally created: \($created)_\n",
(('"$JQ_EXTRACT_MESSAGES"')[] |
"\n---\n\n### \(.role | if . == "user" then "You" else "ChatGPT" end)\n\n\(.content)"
)
')
if [[ -n "$output_file" ]]; then
echo "$markdown" > "$output_file"
echo -e "${GREEN}Exported to: $output_file${NC}"
else
# Generate filename from title (cleaner jq-based sanitization)
local title
title=$(echo "$response" | jq -r '.title | ascii_downcase | gsub("[^a-z0-9]+"; "-") | gsub("^-|-$"; "")')
local filename="${title:0:50}.md"
echo "$markdown" > "$filename"
echo -e "${GREEN}Exported to: $filename${NC}"
fi
}
# Open conversation in browser
cmd_open() {
if [[ $# -lt 1 ]]; then
echo -e "${RED}Error: Missing conversation ID${NC}" >&2
echo "Usage: chatgpt-history open <id>" >&2
exit 1
fi
local conv_id="$1"
# Resolve partial ID or index
conv_id=$(resolve_id "$conv_id") || exit 1
local url="https://chatgpt.com/c/$conv_id"
echo -e "${GREEN}Opening: $url${NC}"
open "$url"
}
# Interactive picker using fzf
cmd_pick() {
if ! has_fzf; then
echo -e "${RED}Error: fzf is required for pick command${NC}" >&2
echo "Install with: brew install fzf" >&2
exit 1
fi
local action="show"
local limit=50
while [[ $# -gt 0 ]]; do
case "$1" in
--export|-e)
action="export"
shift
;;
--limit|-l)
limit="$2"
shift 2
;;
*)
shift
;;
esac
done
local response
response=$(chatgpt_request "/backend-api/conversations?offset=0&limit=$limit")
if echo "$response" | jq -e '.error' &>/dev/null; then
echo -e "${RED}Error: $(echo "$response" | jq -r '.error')${NC}" >&2
exit 1
fi
# Cache for potential follow-up commands
mkdir -p "$CACHE_DIR"
echo "$response" > "$CACHE_FILE"
local selection
selection=$(echo "$response" | jq -r '.items[] | "\(.id)\t\(.create_time | split("T")[0])\t\(.title)"' | \
fzf --with-nth=2,3 --delimiter='\t' \
--preview="$0 show {1} --last 4 2>/dev/null | head -30" \
--preview-window=right:50%:wrap \
--header="Select conversation (${action})")
if [[ -z "$selection" ]]; then
exit 0
fi
local conv_id
conv_id=$(echo "$selection" | cut -f1)
case "$action" in
show)
cmd_show "$conv_id"
;;
export)
cmd_export "$conv_id"
;;
esac
}
# Sync conversations to local markdown files
cmd_sync() {
local sync_dir="$SYNC_DIR"
local limit=""
local since_date=""
local dry_run=false
local reindex=false
local force=false
while [[ $# -gt 0 ]]; do
case "$1" in
--dir|-d)
sync_dir="$2"
shift 2
;;
--limit|-l)
limit="$2"
shift 2
;;
--since|-s)
since_date="$2"
shift 2
;;
--dry-run|-n)
dry_run=true
shift
;;
--reindex|-r)
reindex=true
shift
;;
--force|-f)
force=true
shift
;;
*)
shift
;;
esac
done
mkdir -p "$sync_dir"
mkdir -p "$CACHE_DIR"
# Load sync state (maps full UUID -> {update_time, filename})
local sync_state="{}"
if [[ -f "$SYNC_STATE_FILE" ]] && [[ "$force" != "true" ]]; then
sync_state=$(cat "$SYNC_STATE_FILE")
fi
# Scan existing files so we can detect already-exported conversations on first run
local existing_files="{}"
if [[ "$force" != "true" ]]; then
existing_files=$(sync_existing_files "$sync_dir")
fi
# Paginate through all conversations
local offset=0
local page_size=28
local all_conversations="[]"
echo -e "${BOLD}Fetching conversation list...${NC}" >&2
while true; do
local endpoint="/backend-api/conversations?offset=$offset&limit=$page_size"
local response
response=$(chatgpt_request "$endpoint")
if echo "$response" | jq -e '.error' &>/dev/null; then
echo -e "${RED}Error: $(echo "$response" | jq -r '.error')${NC}" >&2
exit 1
fi
local page_items
page_items=$(echo "$response" | jq '.items // []')
local page_count
page_count=$(echo "$page_items" | jq 'length')
if [[ "$page_count" -eq 0 ]]; then
break
fi
all_conversations=$(echo "$all_conversations $page_items" | jq -s 'add')
offset=$((offset + page_count))
local total
total=$(echo "$response" | jq '.total // 0')
echo -ne "\r${YELLOW}Fetched $offset / $total conversations...${NC}" >&2
# Stop if we've hit the user-specified limit
if [[ -n "$limit" ]] && [[ "$offset" -ge "$limit" ]]; then
all_conversations=$(echo "$all_conversations" | jq ".[:$limit]")
break
fi
# Stop if we've got them all
if [[ "$offset" -ge "$total" ]]; then
break
fi
sleep "$SYNC_DELAY"
done
echo -ne "\r\033[K" >&2
local total_count
total_count=$(echo "$all_conversations" | jq 'length')
echo -e "${BOLD}Found $total_count conversations${NC}" >&2
# Filter by --since if specified
if [[ -n "$since_date" ]]; then
all_conversations=$(echo "$all_conversations" | jq --arg since "$since_date" \
'[.[] | select(.create_time >= $since)]')
local filtered_count
filtered_count=$(echo "$all_conversations" | jq 'length')
echo -e "${YELLOW}Filtered to $filtered_count conversations since $since_date${NC}" >&2
fi
# Write sync state to temp file so updates persist across iterations
local tmp_state
tmp_state=$(mktemp)
echo "$sync_state" > "$tmp_state"
trap 'rm -f "'"$tmp_state"'"' RETURN
# Process conversations
local conv_count
conv_count=$(echo "$all_conversations" | jq 'length')
local new_count=0
local updated_count=0
local skipped_count=0
local error_count=0
for i in $(seq 0 $((conv_count - 1))); do
local conv
conv=$(echo "$all_conversations" | jq -c ".[$i]")
local conv_id
conv_id=$(echo "$conv" | jq -r '.id')
local update_time
update_time=$(echo "$conv" | jq -r '.update_time')
local title
title=$(echo "$conv" | jq -r '.title // "Untitled"')
# Check sync state β€” skip if update_time matches
local stored_update_time
stored_update_time=$(jq -r --arg id "$conv_id" '.[$id].update_time // ""' "$tmp_state")
if [[ "$stored_update_time" == "$update_time" ]] && [[ "$force" != "true" ]]; then
((skipped_count++)) || true
continue
fi
# Determine if this is new or an update
local action="new"
local short_id="${conv_id:0:8}"
if [[ -n "$stored_update_time" ]]; then
action="updated"
elif echo "$existing_files" | jq -e --arg sid "$short_id" '.[$sid]' &>/dev/null; then
action="updated"
fi
# Build filename: YYYY-MM-DD_<slug>_<8-char-uuid>.md
local create_date
create_date=$(echo "$conv" | jq -r '.create_time | split("T")[0]')
local slug
slug=$(echo "$title" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g; s/--*/-/g; s/^-//; s/-$//' | cut -c1-50)
local filename="${create_date}_${slug}_${short_id}.md"
if [[ "$dry_run" == "true" ]]; then
echo -e " ${GREEN}[$action]${NC} $filename β€” $title"
continue
fi
# Fetch full conversation
sleep "$SYNC_DELAY"
local full_response
full_response=$(chatgpt_request "/backend-api/conversation/$conv_id")
if echo "$full_response" | jq -e '.detail' &>/dev/null; then
echo -e " ${RED}[error]${NC} $title β€” $(echo "$full_response" | jq -r '.detail')" >&2
((error_count++)) || true
continue
fi
# Remove old file if filename changed (title or date updated)
local old_filename
old_filename=$(jq -r --arg id "$conv_id" '.[$id].filename // ""' "$tmp_state")
if [[ -z "$old_filename" ]]; then
# Check existing files by short ID
old_filename=$(echo "$existing_files" | jq -r --arg sid "$short_id" '.[$sid] // ""')
fi
if [[ -n "$old_filename" ]] && [[ "$old_filename" != "$filename" ]] && [[ -f "$sync_dir/$old_filename" ]]; then
rm "$sync_dir/$old_filename"
fi
# Export to markdown
local markdown
markdown=$(echo "$full_response" | jq -r '
.title as $title |
.conversation_id as $conv_id |
(.create_time | if type == "number" then todate else . end) as $created |
"# \($title)\n\n_ChatGPT conversation \($conv_id)_\n_Created: \($created)_\n",
(('"$JQ_EXTRACT_MESSAGES"')[] |
"\n---\n\n### \(.role | if . == "user" then "You" else "ChatGPT" end)\n\n\(.content)"
)
')
echo "$markdown" > "$sync_dir/$filename"
# Update sync state on disk
local updated_state
updated_state=$(jq --arg id "$conv_id" --arg ut "$update_time" --arg fn "$filename" \
'.[$id] = {update_time: $ut, filename: $fn}' "$tmp_state")
echo "$updated_state" > "$tmp_state"
if [[ "$action" == "new" ]]; then
((new_count++)) || true
echo -e " ${GREEN}[new]${NC} $filename"
else
((updated_count++)) || true
echo -e " ${BLUE}[updated]${NC} $filename"
fi
done
# Persist sync state
if [[ "$dry_run" != "true" ]]; then
cp "$tmp_state" "$SYNC_STATE_FILE"
fi
echo ""
if [[ "$dry_run" == "true" ]]; then
echo -e "${BOLD}Dry run complete.${NC} No files written."
else
echo -e "${BOLD}Sync complete:${NC} $new_count new, $updated_count updated, $skipped_count unchanged, $error_count errors"
fi
# Reindex if requested
if [[ "$reindex" == "true" ]] && [[ "$dry_run" != "true" ]]; then
echo -e "${YELLOW}Reindexing qmd collection...${NC}"
if command -v qmd &>/dev/null; then
qmd update -c chatgpt 2>/dev/null || echo -e "${YELLOW}Note: qmd collection 'chatgpt' not found. Create it with:${NC}\n qmd collection add '$sync_dir' --name chatgpt --mask '**/*.md'"
else
echo -e "${YELLOW}qmd not found β€” skipping reindex${NC}"
fi
fi
}
# Scan existing files to build a set of known short IDs (for first-run detection)
# Returns a JSON object mapping short_id -> filename
sync_existing_files() {
local dir="$1"
local files="{}"
for file in "$dir"/*_????????.md; do
[[ -f "$file" ]] || continue
local basename
basename=$(basename "$file")
local short_id
short_id=$(echo "$basename" | sed -n 's/.*_\([0-9a-f]\{8\}\)\.md$/\1/p')
if [[ -n "$short_id" ]]; then
files=$(echo "$files" | jq --arg id "$short_id" --arg fn "$basename" \
'.[$id] = $fn')
fi
done
echo "$files"
}
# Show help
cmd_help() {
cat << 'EOF'
chatgpt-history - CLI to access ChatGPT conversation history via Safari
USAGE:
chatgpt-history <command> [options]
COMMANDS:
list [options] List conversations (default: 20)
--limit N Number of conversations to fetch
--verbose, -v Show full UUIDs
--today Only today's conversations
--yesterday Since yesterday
--this-week Last 7 days
--this-month Last 30 days
--since YYYY-MM-DD Since specific date
show <id> [options] Show conversation content
--last N Only last N messages
--format FORMAT Output format: markdown (default), plain, json
--raw Alias for --format plain
search <query> [options] Search conversations (titles and content)
--all Fetch all results (default: first 30)
pick [options] Interactive fzf picker (requires fzf)
--export Export selected instead of show
open <id> Open conversation in browser
export <id> [file] Export conversation to markdown file
sync [options] Sync all conversations to local markdown
--dir PATH Output directory (default: ~/Documents/ChatGPT-Conversations)
--limit N Max conversations to sync
--since YYYY-MM-DD Only sync conversations since date
--dry-run, -n Show what would be synced without writing
--reindex, -r Run qmd update after sync
--force, -f Re-export all, ignoring sync state
help Show this help message
ID SHORTCUTS:
After running 'list', you can use:
- Numeric index: chatgpt-history show 1 (first in list)
- Partial UUID: chatgpt-history show 6959 (prefix match)
- Full UUID: chatgpt-history show 6959xxxx-xxxx-...
EXAMPLES:
chatgpt-history list --today
chatgpt-history list --since 2026-01-01
chatgpt-history show 1 --last 5
chatgpt-history show 1 --raw | pbcopy # copy for Claude
chatgpt-history show 1 --format json # structured JSON output
chatgpt-history search "python"
chatgpt-history search "API key" --all # fetch all results
chatgpt-history pick
chatgpt-history pick --export
chatgpt-history open 1 # open in browser
chatgpt-history export 1 my-chat.md
chatgpt-history sync # sync all conversations
chatgpt-history sync --dry-run # preview what would sync
chatgpt-history sync --since 2026-01-01 # only recent conversations
chatgpt-history sync --reindex # sync + update qmd index
chatgpt-history sync --force # re-export everything
ENVIRONMENT:
CHATGPT_HISTORY_SYNC_DIR Sync directory (default: ~/Documents/ChatGPT-Conversations)
CHATGPT_HISTORY_TIMEOUT Request timeout in seconds (default: 30)
REQUIREMENTS:
- Safari must be open with a chatgpt.com tab (logged in)
- Safari > Develop > Allow JavaScript from Apple Events must be enabled
- jq must be installed (brew install jq)
- fzf (optional, for pick command)
NOTE:
This tool works by injecting JavaScript into your Safari ChatGPT tab
to make authenticated API requests. All requests happen within the
browser context, so your session cookies and auth are preserved.
EOF
}
# Main
main() {
check_deps
local cmd="${1:-list}"
shift || true
case "$cmd" in
list|ls)
cmd_list "$@"
;;
show|view|get)
cmd_show "$@"
;;
search|find|grep)
cmd_search "$@"
;;
pick|fzf)
cmd_pick "$@"
;;
open)
cmd_open "$@"
;;
export|save)
cmd_export "$@"
;;
sync)
cmd_sync "$@"
;;
help|--help|-h)
cmd_help
;;
*)
echo -e "${RED}Unknown command: $cmd${NC}" >&2
echo "Run 'chatgpt-history help' for usage." >&2
exit 1
;;
esac
}
main "$@"
@ericboehs
Copy link
Author

ericboehs commented Mar 1, 2026

πŸ” chatgpt-history

CLI tool to access and sync your ChatGPT conversation history via Safari browser automation.

How It Works

Injects JavaScript into your Safari ChatGPT tab to make authenticated API requests against ChatGPT's backend. No API keys needed β€” it uses your existing browser session.

Features

  • List conversations with date filtering (--today, --since, --this-week)
  • Show conversation content in markdown, plain text, or JSON
  • Search conversations by title and content (server-side)
  • Export individual conversations to markdown files
  • Sync all conversations to local markdown files with incremental updates
  • Pick conversations interactively with fzf preview
  • Open conversations in the browser
  • Numeric index shortcuts after list (e.g., show 1) and partial UUID matching

Installation

curl -fsSL https://gist.githubusercontent.com/ericboehs/78b60498946666599c02dddd672d362c/raw/chatgpt-history -o ~/bin/chatgpt-history
chmod +x ~/bin/chatgpt-history

Dependencies

  • Safari with a chatgpt.com tab (logged in)
  • Safari > Develop > Allow JavaScript from Apple Events (enabled)
  • jq (brew install jq)
  • fzf (optional, for pick command)

Usage

# List recent conversations
chatgpt-history list
chatgpt-history list --today
chatgpt-history list --since 2026-01-01

# Show a conversation (by index, partial UUID, or full UUID)
chatgpt-history show 1
chatgpt-history show 1 --last 5
chatgpt-history show 1 --raw | pbcopy    # copy for Claude

# Search
chatgpt-history search "python"
chatgpt-history search "API" --all

# Interactive picker
chatgpt-history pick

# Export single conversation
chatgpt-history export 1 my-chat.md

# Sync all conversations to local markdown
chatgpt-history sync                      # full incremental sync
chatgpt-history sync --dry-run            # preview without writing
chatgpt-history sync --since 2026-01-01   # only recent conversations
chatgpt-history sync --force              # re-export everything
chatgpt-history sync --reindex            # sync + update qmd index

Sync Details

The sync command exports all conversations as markdown files to ~/Documents/ChatGPT-Conversations/ (configurable via CHATGPT_HISTORY_SYNC_DIR).

File naming: YYYY-MM-DD_<title-slug>_<8-char-uuid>.md

  • Date from conversation creation time
  • Slugified title for human readability
  • 8-character UUID prefix for uniqueness (stable across title changes)

Incremental sync: Tracks each conversation's update_time in ~/.cache/chatgpt-history/sync-state.json. Only fetches conversations that have changed since last sync.

Designed for qmd indexing:

qmd collection add ~/Documents/ChatGPT-Conversations --name chatgpt --mask "**/*.md"

Configuration

Environment Variable Default Description
CHATGPT_HISTORY_TIMEOUT 30 Request timeout in seconds
CHATGPT_HISTORY_SYNC_DIR ~/Documents/ChatGPT-Conversations Sync output directory

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment