Skip to content

Instantly share code, notes, and snippets.

@jhkchan
Created February 18, 2026 20:42
Show Gist options
  • Select an option

  • Save jhkchan/bce5b51419600f5dbb85379cff5ad37e to your computer and use it in GitHub Desktop.

Select an option

Save jhkchan/bce5b51419600f5dbb85379cff5ad37e to your computer and use it in GitHub Desktop.
Cursor Cleanup Script
#!/bin/bash
# Cursor Cleanup Script
# Removes histories of non-existent projects/workspaces and compacts the database
#
# What this does:
# 1. Backs up state.vscdb before any changes
# 2. Removes non-existent paths from the recently opened list
# 3. Removes workspace storage directories for non-existent projects
# 4. Removes orphan workspace storage directories (no workspace.json or no folder)
# 5. Cleans up stale inline diff data in cursorDiskKV
# 6. Cleans up file edit history for non-existent paths
# 7. VACUUMs (compacts) the global state.vscdb and removes the stale backup
#
# IMPORTANT: Close Cursor completely before running this script!
set -euo pipefail
CURSOR_DIR="$HOME/Library/Application Support/Cursor"
USER_DIR="$CURSOR_DIR/User"
GLOBAL_STORAGE="$USER_DIR/globalStorage"
WS_STORAGE="$USER_DIR/workspaceStorage"
HISTORY_DIR="$USER_DIR/History"
STATE_DB="$GLOBAL_STORAGE/state.vscdb"
STATE_DB_BACKUP="$GLOBAL_STORAGE/state.vscdb.backup"
BACKUP_DIR="$HOME/cursor-backup-$(date +%Y%m%d-%H%M%S)"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
log() { echo -e "${GREEN}[OK]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
err() { echo -e "${RED}[ERROR]${NC} $1"; }
# Check Cursor is not running
if pgrep -f "Cursor.app/Contents/MacOS/Cursor" > /dev/null 2>&1; then
err "Cursor is still running! Please close Cursor completely first."
err "You can force quit with: pkill -f 'Cursor.app/Contents/MacOS/Cursor'"
exit 1
fi
echo "========================================="
echo " Cursor Cleanup Script"
echo "========================================="
echo ""
# Show current sizes
echo "Current sizes:"
echo " Global state.vscdb: $(du -sh "$STATE_DB" 2>/dev/null | cut -f1)"
echo " Global state.vscdb.backup: $(du -sh "$STATE_DB_BACKUP" 2>/dev/null | cut -f1)"
echo " Workspace storage: $(du -sh "$WS_STORAGE" 2>/dev/null | cut -f1)"
echo " File history: $(du -sh "$HISTORY_DIR" 2>/dev/null | cut -f1)"
echo " Total Cursor dir: $(du -sh "$CURSOR_DIR" 2>/dev/null | cut -f1)"
echo ""
# Step 1: Backup
echo "Step 1: Creating backup..."
mkdir -p "$BACKUP_DIR"
cp "$STATE_DB" "$BACKUP_DIR/state.vscdb"
log "Backed up state.vscdb to $BACKUP_DIR/state.vscdb"
echo ""
# Step 2: Clean recently opened paths list
echo "Step 2: Cleaning recently opened paths list..."
python3 << 'PYEOF'
import json, os, sqlite3, sys
from urllib.parse import unquote, urlparse
db_path = os.path.expanduser("~/Library/Application Support/Cursor/User/globalStorage/state.vscdb")
conn = sqlite3.connect(db_path)
row = conn.execute("SELECT value FROM ItemTable WHERE key='history.recentlyOpenedPathsList'").fetchone()
if not row:
print(" No recentlyOpenedPathsList found")
conn.close()
sys.exit(0)
data = json.loads(row[0])
entries = data.get('entries', [])
original_count = len(entries)
kept = []
removed = 0
for e in entries:
folder_uri = e.get('folderUri', '')
file_uri = e.get('fileUri', '')
ws_config = e.get('workspace', {}).get('configPath', '')
uri = folder_uri or file_uri or ws_config
if uri and uri.startswith('file://'):
path = unquote(urlparse(uri).path)
if not os.path.exists(path):
removed += 1
continue
kept.append(e)
data['entries'] = kept
conn.execute("UPDATE ItemTable SET value=? WHERE key='history.recentlyOpenedPathsList'", (json.dumps(data),))
conn.commit()
conn.close()
print(f" Removed {removed} entries with non-existent paths (kept {len(kept)} of {original_count})")
PYEOF
log "Recently opened list cleaned"
echo ""
# Step 3: Remove workspace storage directories for non-existent projects
echo "Step 3: Removing workspace storage for non-existent projects..."
removed_ws=0
removed_ws_size=0
for dir in "$WS_STORAGE"/*/; do
ws_id=$(basename "$dir")
ws_json="$dir/workspace.json"
should_remove=false
if [ -f "$ws_json" ]; then
folder=$(python3 -c "
import json, sys
from urllib.parse import unquote, urlparse
d=json.load(open(sys.argv[1]))
f=d.get('folder','')
if f.startswith('file://'):
print(unquote(urlparse(f).path))
elif f:
print(f)
else:
print('')
" "$ws_json" 2>/dev/null)
if [ -n "$folder" ]; then
if [ ! -d "$folder" ]; then
should_remove=true
fi
fi
else
# No workspace.json = orphan
should_remove=true
fi
if [ "$should_remove" = true ]; then
size=$(du -sk "$dir" 2>/dev/null | cut -f1)
removed_ws_size=$((removed_ws_size + size))
rm -rf "$dir"
removed_ws=$((removed_ws + 1))
fi
done
log "Removed $removed_ws workspace dirs ($(( removed_ws_size / 1024 )) MB)"
echo ""
# Step 4: Clean up inline diff data for gone workspace IDs
echo "Step 4: Cleaning stale inline diff data from global state..."
python3 << 'PYEOF'
import os, sqlite3
db_path = os.path.expanduser("~/Library/Application Support/Cursor/User/globalStorage/state.vscdb")
ws_base = os.path.expanduser("~/Library/Application Support/Cursor/User/workspaceStorage")
# Get remaining workspace IDs
existing_ws_ids = set()
if os.path.isdir(ws_base):
existing_ws_ids = set(os.listdir(ws_base))
conn = sqlite3.connect(db_path)
# Find inlineDiff and inlineDiffs entries referencing workspace IDs
rows = conn.execute("SELECT key FROM cursorDiskKV WHERE key LIKE 'inlineDiff:%' OR key LIKE 'inlineDiffs-%'").fetchall()
to_delete = []
for (key,) in rows:
# Extract workspace ID from key
if key.startswith('inlineDiff:'):
ws_id = key[11:43] # 32-char hex workspace ID
elif key.startswith('inlineDiffs-'):
ws_id = key[12:44]
else:
continue
if ws_id not in existing_ws_ids:
to_delete.append(key)
if to_delete:
conn.executemany("DELETE FROM cursorDiskKV WHERE key=?", [(k,) for k in to_delete])
conn.commit()
conn.close()
print(f" Removed {len(to_delete)} stale inline diff entries")
PYEOF
log "Stale inline diffs cleaned"
echo ""
# Step 5: Clean up file history for non-existent files
echo "Step 5: Cleaning file history for non-existent files..."
if [ -d "$HISTORY_DIR" ]; then
removed_hist=0
removed_hist_size=0
for hist_dir in "$HISTORY_DIR"/*/; do
[ -d "$hist_dir" ] || continue
entries_json="$hist_dir/entries.json"
if [ -f "$entries_json" ]; then
# Check if the original file path still exists
orig_path=$(python3 -c "
import json, sys
from urllib.parse import unquote, urlparse
try:
d=json.load(open(sys.argv[1]))
r=d.get('resource','')
if r.startswith('file://'):
print(unquote(urlparse(r).path))
else:
print(r)
except:
print('')
" "$entries_json" 2>/dev/null)
if [ -n "$orig_path" ] && [ ! -e "$orig_path" ]; then
size=$(du -sk "$hist_dir" 2>/dev/null | cut -f1)
removed_hist_size=$((removed_hist_size + size))
rm -rf "$hist_dir"
removed_hist=$((removed_hist + 1))
fi
fi
done
log "Removed $removed_hist file history dirs ($(( removed_hist_size / 1024 )) MB)"
else
warn "No History directory found"
fi
echo ""
# Step 6: Remove stale state.vscdb.backup (it's a duplicate of the bloated DB)
echo "Step 6: Removing stale state.vscdb.backup..."
if [ -f "$STATE_DB_BACKUP" ]; then
backup_size=$(du -sh "$STATE_DB_BACKUP" | cut -f1)
rm -f "$STATE_DB_BACKUP"
log "Removed state.vscdb.backup ($backup_size)"
else
warn "No backup file found"
fi
echo ""
# Step 7: VACUUM the database
echo "Step 7: Compacting (VACUUM) state.vscdb..."
before_size=$(du -sh "$STATE_DB" | cut -f1)
sqlite3 "$STATE_DB" "VACUUM;"
after_size=$(du -sh "$STATE_DB" | cut -f1)
log "Compacted state.vscdb: $before_size -> $after_size"
echo ""
# Also vacuum workspace state databases
echo "Step 8: Compacting remaining workspace databases..."
compacted=0
for db in "$WS_STORAGE"/*/state.vscdb; do
[ -f "$db" ] || continue
sqlite3 "$db" "VACUUM;" 2>/dev/null && compacted=$((compacted + 1))
done
log "Compacted $compacted workspace databases"
echo ""
# Final summary
echo "========================================="
echo " Cleanup Complete!"
echo "========================================="
echo ""
echo "Final sizes:"
echo " Global state.vscdb: $(du -sh "$STATE_DB" 2>/dev/null | cut -f1)"
echo " Workspace storage: $(du -sh "$WS_STORAGE" 2>/dev/null | cut -f1)"
echo " File history: $(du -sh "$HISTORY_DIR" 2>/dev/null | cut -f1)"
echo " Total Cursor dir: $(du -sh "$CURSOR_DIR" 2>/dev/null | cut -f1)"
echo ""
echo "Backup saved to: $BACKUP_DIR"
echo "You can delete the backup after verifying Cursor works correctly:"
echo " rm -rf $BACKUP_DIR"
echo ""
echo "You can now reopen Cursor."
@jhkchan
Copy link
Author

jhkchan commented Feb 18, 2026

Tested on my Mac

Diagnosis: Cursor Performance Issues                                                                                                                                                                             
                                                                                                                                                                                                                   
  Primary Problem: Massively bloated state.vscdb database                                                                                                                                                          
                                                                                                                                                                                                                   
  File: ~/Library/Application Support/Cursor/User/globalStorage/state.vscdb                                                                                                                                        
  Size: 3.2 GB (+ 3.2 GB backup = 6.4 GB for this one file alone)                                                                                                                                                  
                                                                                                                                                                                                                   
  The cursorDiskKV table has 86,425 rows containing 3.05 GB of data:                                                                                                                                               
                                                                                                                                                                                                                   
  ┌──────────────────────────────────────┬─────────┬──────────┐
  │               Category               │ Entries │   Size   │
  ├──────────────────────────────────────┼─────────┼──────────┤
  │ checkpointId (undo/redo checkpoints) │ 9,656   │ 1,577 MB │
  ├──────────────────────────────────────┼─────────┼──────────┤
  │ bubbleId (chat messages)             │ 45,871  │ 682 MB   │
  ├──────────────────────────────────────┼─────────┼──────────┤
  │ agentKv:blob (agent context data)    │ 21,255  │ 415 MB   │
  ├──────────────────────────────────────┼─────────┼──────────┤
  │ codeBlockDiff (inline diffs)         │ 5,377   │ 233 MB   │
  ├──────────────────────────────────────┼─────────┼──────────┤
  │ messageRequestContext                │ 1,626   │ 110 MB   │
  ├──────────────────────────────────────┼─────────┼──────────┤
  │ composerData                         │ 959     │ 27 MB    │
  ├──────────────────────────────────────┼─────────┼──────────┤
  │ composer.content + others            │ many    │ ~10 MB   │
  └──────────────────────────────────────┴─────────┴──────────┘

  Contributing Factors

  1. 12 Cursor windows open - each renderer is consuming 10-40% CPU
  2. GPU process (PID 2833) at 113.7% CPU - running since Friday, 148 hours of CPU time. It's trying to render content for all 12 windows while the database underneath is enormous
  3. 169 workspace entries, 22 of which are temporary/untitled
  4. 1,206 file history entries (157 MB)
  5. 2.2 GB of CachedData

  Root Cause

  Your chat/composer history has accumulated ~3 GB of data in a single SQLite database. Every time Cursor opens a window or loads a chat panel, it queries this bloated database. With 12 windows all doing this
  simultaneously, the GPU process gets overwhelmed rendering the UI, leading to high GPU usage and inability to load chats.
⏺ Cleanup completed. Here's the summary:                                                                                                                                                                                                                
                                                                                                                                                                                                                                                        
  ┌────────────────────────┬─────────────────────┬──────────────────┬───────────────────────────┐                                                                                                                                                       
  │          What          │       Before        │      After       │           Freed           │                                                                                                                                                       
  ├────────────────────────┼─────────────────────┼──────────────────┼───────────────────────────┤                                                                                                                                                       
  │ state.vscdb.backup     │ 3.2 GB              │ removed          │ 3.2 GB                    │                                                                                                                                                       
  ├────────────────────────┼─────────────────────┼──────────────────┼───────────────────────────┤                                                                                                                                                       
  │ Recently opened list   │ 364 entries         │ 235 entries      │ 129 stale entries removed │
  ├────────────────────────┼─────────────────────┼──────────────────┼───────────────────────────┤
  │ Workspace storage dirs │ 169 (207 MB)        │ 108 (155 MB)     │ 61 dirs (36 MB)           │
  ├────────────────────────┼─────────────────────┼──────────────────┼───────────────────────────┤
  │ File history           │ 1,206 dirs (157 MB) │ 693 dirs (99 MB) │ 513 dirs (57 MB)          │
  ├────────────────────────┼─────────────────────┼──────────────────┼───────────────────────────┤
  │ Inline diff entries    │ 110 stale           │ 0 stale          │ 110 entries               │
  ├────────────────────────┼─────────────────────┼──────────────────┼───────────────────────────┤
  │ Total Cursor dir       │ 10 GB               │ 7.2 GB           │ ~2.8 GB                   │
  └────────────────────────┴─────────────────────┴──────────────────┴───────────────────────────┘

  The state.vscdb itself stayed at 3.2 GB because the VACUUM couldn't reclaim much — the data inside (checkpoints, chat bubbles, agent blobs) is still actively referenced by your 959 composer sessions. That's the accumulated chat/composer history
  across all your current projects.

  If the high GPU usage and chat loading issues persist after reopening Cursor, the next step would be to purge old composer/chat history from the database (the 1.5 GB of checkpoints and 682 MB of chat bubbles). Want me to help with that if the
  problem continues?

  Your backup is at ~/cursor-backup-20260217-235007/ — delete it once you've verified Cursor works fine.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment