Skip to content

Instantly share code, notes, and snippets.

@nsdevaraj
Last active March 4, 2026 08:16
Show Gist options
  • Select an option

  • Save nsdevaraj/956c719c45ec353017b25c806e86969a to your computer and use it in GitHub Desktop.

Select an option

Save nsdevaraj/956c719c45ec353017b25c806e86969a to your computer and use it in GitHub Desktop.
analyse chrome memory heap
"""
Fast V8 heap snapshot analyser
Focuses on: type distribution, detached nodes, large objects, closure/object names
Skips expensive full-graph retained-size computation.
"""
import json, collections, sys
SNAPSHOT_PATH = '/sessions/practical-relaxed-franklin/mnt/uploads/Heap-20260226T131952.heapsnapshot'
print("Loading snapshot…", flush=True)
with open(SNAPSHOT_PATH, 'r', encoding='utf-8') as f:
data = json.load(f)
print("Done.", flush=True)
meta = data['snapshot']['meta']
snap_info = data['snapshot']
node_fields = meta['node_fields']
edge_fields = meta['edge_fields']
node_type_names= meta['node_types'][0]
edge_type_names= meta['edge_types'][0]
raw_nodes = data['nodes']
raw_edges = data['edges']
strings = data['strings']
NF = len(node_fields)
EF = len(edge_fields)
ni_type = node_fields.index('type')
ni_name = node_fields.index('name')
ni_id = node_fields.index('id')
ni_self_size = node_fields.index('self_size')
ni_edge_count = node_fields.index('edge_count')
ni_detach = node_fields.index('detachedness') if 'detachedness' in node_fields else None
ei_type = edge_fields.index('type')
ei_name = edge_fields.index('name_or_index')
ei_to_node = edge_fields.index('to_node')
node_count = len(raw_nodes) // NF
edge_count = len(raw_edges) // EF
print(f"\n{'='*60}")
print(f" Node count : {node_count:,}")
print(f" Edge count : {edge_count:,}")
print(f" Strings : {len(strings):,}")
# ── helper ─────────────────────────────────────────────────────────────────────
def ntype(type_idx):
return node_type_names[type_idx] if type_idx < len(node_type_names) else str(type_idx)
def nname(name_idx):
return strings[name_idx] if name_idx < len(strings) else '?'
# ── 1. Type distribution ───────────────────────────────────────────────────────
print("\n== 1. NODE TYPE DISTRIBUTION ==")
type_counts = collections.Counter()
type_sizes = collections.Counter()
for i in range(node_count):
b = i * NF
t = raw_nodes[b + ni_type]
ss = raw_nodes[b + ni_self_size]
tn = ntype(t)
type_counts[tn] += 1
type_sizes[tn] += ss
total_self = sum(type_sizes.values())
print(f" Total self-size : {total_self/1024/1024:.1f} MB")
for tn, cnt in type_counts.most_common():
sz = type_sizes[tn]
print(f" {tn:28s} count={cnt:>8,} size={sz/1024/1024:8.2f} MB")
# ── 2. Top 50 by self_size ─────────────────────────────────────────────────────
print("\n== 2. TOP 50 NODES BY SELF SIZE ==")
top_self = sorted(range(node_count), key=lambda i: raw_nodes[i*NF+ni_self_size], reverse=True)[:50]
for rank, idx in enumerate(top_self, 1):
b = idx * NF
t = ntype(raw_nodes[b+ni_type])
name = nname(raw_nodes[b+ni_name])
ss = raw_nodes[b+ni_self_size]
ec = raw_nodes[b+ni_edge_count]
print(f" {rank:3}. [{t:20s}] {name[:70]:70s} self={ss:>10,} edges={ec:>6,}")
# ── 3. Detached DOM nodes ──────────────────────────────────────────────────────
print("\n== 3. DETACHED DOM NODES ==")
if ni_detach is not None:
det_names = collections.Counter()
det_sizes = collections.Counter()
det_count = 0
det_size = 0
for i in range(node_count):
b = i * NF
if raw_nodes[b + ni_detach] == 1:
det_count += 1
name = nname(raw_nodes[b+ni_name])
ss = raw_nodes[b+ni_self_size]
det_names[name] += 1
det_sizes[name] += ss
det_size += ss
print(f" Total detached nodes : {det_count:,}")
print(f" Total detached size : {det_size/1024:.1f} KB")
print(" Top detached by count:")
for name, cnt in det_names.most_common(25):
print(f" {cnt:>6,}x {det_sizes[name]/1024:8.1f} KB {name}")
else:
print(" detachedness field not present")
# ── 4. Closure names (potential listener leaks) ────────────────────────────────
print("\n== 4. CLOSURE / FUNCTION NAMES (accumulation patterns) ==")
closure_idx = node_type_names.index('closure') if 'closure' in node_type_names else -1
code_idx = node_type_names.index('code') if 'code' in node_type_names else -1
cl_names = collections.Counter()
cl_sizes = collections.Counter()
for i in range(node_count):
b = i * NF
t = raw_nodes[b+ni_type]
if t == closure_idx or t == code_idx:
name = nname(raw_nodes[b+ni_name])
ss = raw_nodes[b+ni_self_size]
cl_names[name] += 1
cl_sizes[name] += ss
print(f" Total closures+code: {sum(cl_names.values()):,}")
print(" Most frequent (high count = possible event-listener accumulation):")
for name, cnt in cl_names.most_common(40):
print(f" {cnt:>7,}x {cl_sizes[name]/1024:8.1f} KB {name}")
# ── 5. Object names (user-defined classes) ────────────────────────────────────
print("\n== 5. TOP 60 OBJECT CLASS NAMES ==")
obj_idx = node_type_names.index('object') if 'object' in node_type_names else -1
obj_names = collections.Counter()
obj_sizes = collections.Counter()
for i in range(node_count):
b = i * NF
if raw_nodes[b+ni_type] == obj_idx:
name = nname(raw_nodes[b+ni_name])
ss = raw_nodes[b+ni_self_size]
obj_names[name] += 1
obj_sizes[name] += ss
total_obj_size = sum(obj_sizes.values())
print(f" Total object nodes: {sum(obj_names.values()):,} ({total_obj_size/1024/1024:.1f} MB)")
for name, cnt in obj_names.most_common(60):
sz = obj_sizes[name]
print(f" {cnt:>7,}x {sz/1024/1024:7.2f} MB {name}")
# ── 6. Top objects by total size ──────────────────────────────────────────────
print("\n== 6. TOP 30 OBJECT CLASSES BY TOTAL SIZE ==")
for name, sz in sorted(obj_sizes.items(), key=lambda x: x[1], reverse=True)[:30]:
cnt = obj_names[name]
print(f" {sz/1024/1024:9.2f} MB {cnt:>7,}x {name}")
# ── 7. Top 30 nodes by edge count ────────────────────────────────────────────
print("\n== 7. TOP 30 NODES BY EDGE COUNT (large maps / emitters) ==")
top_edges = sorted(range(node_count), key=lambda i: raw_nodes[i*NF+ni_edge_count], reverse=True)[:30]
for rank, idx in enumerate(top_edges, 1):
b = idx * NF
t = ntype(raw_nodes[b+ni_type])
name = nname(raw_nodes[b+ni_name])
ec = raw_nodes[b+ni_edge_count]
ss = raw_nodes[b+ni_self_size]
print(f" {rank:3}. [{t:20s}] {name[:70]:70s} edges={ec:>8,} self={ss:>8,}")
# ── 8. Edge type distribution ─────────────────────────────────────────────────
print("\n== 8. EDGE TYPE DISTRIBUTION ==")
edge_type_dist = collections.Counter()
for j in range(edge_count):
b = j * EF
et = raw_edges[b + ei_type]
edge_type_dist[edge_type_names[et] if et < len(edge_type_names) else str(et)] += 1
for et, cnt in edge_type_dist.most_common():
print(f" {et:20s} {cnt:>10,}")
# ── 9. Large arrays ────────────────────────────────────────────────────────────
print("\n== 9. TOP 20 LARGE ARRAYS ==")
arr_idx = node_type_names.index('array') if 'array' in node_type_names else -1
large_arrs = sorted(
[i for i in range(node_count) if raw_nodes[i*NF+ni_type] == arr_idx],
key=lambda i: raw_nodes[i*NF+ni_self_size], reverse=True
)[:20]
for idx in large_arrs:
b = idx * NF
name = nname(raw_nodes[b+ni_name])
ss = raw_nodes[b+ni_self_size]
ec = raw_nodes[b+ni_edge_count]
print(f" self={ss:>10,} edges={ec:>6,} name={name[:60]}")
# ── 10. Large strings ─────────────────────────────────────────────────────────
print("\n== 10. TOP 20 LARGE STRINGS ==")
str_types = set()
for tn in ('string', 'concatenated string', 'sliced string'):
if tn in node_type_names:
str_types.add(node_type_names.index(tn))
large_strs = sorted(
[i for i in range(node_count) if raw_nodes[i*NF+ni_type] in str_types],
key=lambda i: raw_nodes[i*NF+ni_self_size], reverse=True
)[:20]
for idx in large_strs:
b = idx * NF
name = nname(raw_nodes[b+ni_name])
ss = raw_nodes[b+ni_self_size]
print(f" self={ss:>10,} value={repr(name[:100])}")
# ── 11. Native objects ────────────────────────────────────────────────────────
print("\n== 11. NATIVE OBJECTS ==")
nat_idx = node_type_names.index('native') if 'native' in node_type_names else -1
nat_names = collections.Counter()
nat_sizes = collections.Counter()
for i in range(node_count):
b = i * NF
if raw_nodes[b+ni_type] == nat_idx:
name = nname(raw_nodes[b+ni_name])
nat_names[name] += 1
nat_sizes[name] += raw_nodes[b+ni_self_size]
for name, cnt in nat_names.most_common(25):
print(f" {cnt:>6,}x {nat_sizes[name]/1024:8.1f} KB {name}")
# ── 12. Object shape analysis ─────────────────────────────────────────────────
print("\n== 12. OBJECT SHAPE (MAP) ANALYSIS ==")
shape_idx = node_type_names.index('object shape') if 'object shape' in node_type_names else -1
shape_names = collections.Counter()
shape_sizes = collections.Counter()
if shape_idx >= 0:
for i in range(node_count):
b = i * NF
if raw_nodes[b+ni_type] == shape_idx:
name = nname(raw_nodes[b+ni_name])
shape_names[name] += 1
shape_sizes[name] += raw_nodes[b+ni_self_size]
print(f" Total object shapes: {sum(shape_names.values()):,}")
for name, cnt in shape_names.most_common(20):
print(f" {cnt:>6,}x {shape_sizes[name]/1024:8.1f} KB {name}")
else:
print(" No 'object shape' type found")
print("\n=== ANALYSIS COMPLETE ===", flush=True)
"""
Fast V8 heapsnapshot analyser using numpy for integer array parsing.
Reads each section separately to avoid OOM.
"""
import json, collections, re, numpy as np, sys
SNAPSHOT_PATH = '/sessions/laughing-wonderful-faraday/mnt/uploads/Heap-20260228T102701.heapsnapshot'
# Known byte offsets (pre-scanned)
NODES_ARRAY_START = 874 # byte of '[' opening nodes array
EDGES_OFFSET = 131_522_241 # byte of '"edges"'
STRINGS_OFFSET = 582_275_423 # byte of '"strings"'
FILE_SIZE = 599_666_086
def read_section_bytes(path, start, end):
with open(path, 'rb') as f:
f.seek(start)
return f.read(end - start)
def find_array_content(raw: bytes):
"""Strip everything outside the outermost [ ... ] """
a = raw.index(b'[')
# Find last ] scanning from end
b = raw.rindex(b']')
return raw[a+1:b]
def parse_int_array_np(raw: bytes) -> np.ndarray:
"""Parse a flat JSON int array into a numpy int32 array."""
inner = find_array_content(raw)
return np.frombuffer(inner.replace(b'\n',b'').replace(b'\r',b'').replace(b' ',b''),
dtype=np.uint8) # dummy – use fromstring
# Use numpy's fast CSV parser
return np.fromstring(inner, dtype=np.int32, sep=',')
print("Step 1: Reading snapshot meta…", flush=True)
with open(SNAPSHOT_PATH, 'r', encoding='utf-8') as f:
header = f.read(2000)
snap_block = json.loads(re.search(
r'"snapshot"\s*:\s*(\{.*?"extra_native_bytes"\s*:\s*\d+\})', header, re.DOTALL
).group(1))
meta = snap_block['meta']
node_fields = meta['node_fields']
edge_fields = meta['edge_fields']
node_type_names = meta['node_types'][0]
edge_type_names = meta['edge_types'][0]
NF = len(node_fields)
EF = len(edge_fields)
ni_type = node_fields.index('type')
ni_name = node_fields.index('name')
ni_self_size = node_fields.index('self_size')
ni_edge_count = node_fields.index('edge_count')
ni_detach = node_fields.index('detachedness') if 'detachedness' in node_fields else None
ei_type = edge_fields.index('type')
node_count = snap_block['node_count']
edge_count = snap_block['edge_count']
print(f" Nodes={node_count:,} Edges={edge_count:,}", flush=True)
# ── Step 2: Load strings ──────────────────────────────────────────────────────
print("Step 2: Loading strings (~17 MB)…", flush=True)
raw_tail = read_section_bytes(SNAPSHOT_PATH, STRINGS_OFFSET, FILE_SIZE)
strings = json.loads(re.match(rb'"strings"\s*:\s*(\[.*?\])\s*\}?\s*$', raw_tail, re.DOTALL).group(1))
print(f" Strings: {len(strings):,}", flush=True)
del raw_tail
def ntype(t): return node_type_names[t] if t < len(node_type_names) else str(t)
def nname(n): return strings[n] if n < len(strings) else '?'
# ── Step 3: Load & parse nodes array ─────────────────────────────────────────
print("Step 3: Reading nodes section (~125 MB)…", flush=True)
raw_nodes_bytes = read_section_bytes(SNAPSHOT_PATH, NODES_ARRAY_START, EDGES_OFFSET - 2)
print(" Parsing integers with numpy…", flush=True)
inner = find_array_content(raw_nodes_bytes)
del raw_nodes_bytes
nodes = np.fromstring(inner, dtype=np.int32, sep=',')
del inner
print(f" Nodes array shape: {nodes.shape} ({nodes.nbytes/1024/1024:.1f} MB)", flush=True)
# Reshape to (node_count, NF)
nodes = nodes[:node_count * NF].reshape(-1, NF)
print(f" Reshaped: {nodes.shape}", flush=True)
# ── Step 4: Node analysis ─────────────────────────────────────────────────────
print("Step 4: Analysing nodes…", flush=True)
import heapq
types = nodes[:, ni_type]
names_idx = nodes[:, ni_name]
self_sz = nodes[:, ni_self_size]
edge_cnt = nodes[:, ni_edge_count]
detach = nodes[:, ni_detach] if ni_detach is not None else None
total_self = int(self_sz.sum())
# -- Type distribution
type_counts = collections.Counter()
type_sizes = collections.Counter()
for t_val in np.unique(types):
mask = (types == t_val)
tn = ntype(int(t_val))
type_counts[tn] = int(mask.sum())
type_sizes[tn] = int(self_sz[mask].sum())
# -- Top 50 by self size
top50_idx = np.argpartition(self_sz, -50)[-50:]
top50_idx = top50_idx[np.argsort(self_sz[top50_idx])[::-1]]
# -- Top 30 by edge count
top30_idx = np.argpartition(edge_cnt, -30)[-30:]
top30_idx = top30_idx[np.argsort(edge_cnt[top30_idx])[::-1]]
# -- Detached nodes
det_names_c = collections.Counter()
det_sizes_c = collections.Counter()
det_count = 0
det_size_tot = 0
if detach is not None:
det_mask = detach == 1
det_count = int(det_mask.sum())
det_size_tot = int(self_sz[det_mask].sum())
for nm in names_idx[det_mask]:
det_names_c[int(nm)] += 1
det_sizes_c[int(nm)] += int(self_sz[names_idx == nm].sum()) # approx
# -- Closures / code
closure_idx = node_type_names.index('closure') if 'closure' in node_type_names else -1
code_idx = node_type_names.index('code') if 'code' in node_type_names else -1
cl_mask = (types == closure_idx) | (types == code_idx)
cl_names_c = collections.Counter()
cl_sizes_c = collections.Counter()
for nm, ss in zip(names_idx[cl_mask], self_sz[cl_mask]):
cl_names_c[int(nm)] += 1
cl_sizes_c[int(nm)] += int(ss)
# -- Objects
obj_idx = node_type_names.index('object') if 'object' in node_type_names else -1
obj_mask = (types == obj_idx)
obj_names_c = collections.Counter()
obj_sizes_c = collections.Counter()
for nm, ss in zip(names_idx[obj_mask], self_sz[obj_mask]):
nm = int(nm)
obj_names_c[nm] += 1
obj_sizes_c[nm] += int(ss)
# -- Native objects
nat_idx = node_type_names.index('native') if 'native' in node_type_names else -1
nat_mask = (types == nat_idx)
nat_names_c = collections.Counter()
nat_sizes_c = collections.Counter()
for nm, ss in zip(names_idx[nat_mask], self_sz[nat_mask]):
nm = int(nm)
nat_names_c[nm] += 1
nat_sizes_c[nm] += int(ss)
# -- Object shapes
shape_idx = node_type_names.index('object shape') if 'object shape' in node_type_names else -1
shape_names_c = collections.Counter()
shape_sizes_c = collections.Counter()
if shape_idx >= 0:
sh_mask = (types == shape_idx)
for nm, ss in zip(names_idx[sh_mask], self_sz[sh_mask]):
nm = int(nm)
shape_names_c[nm] += 1
shape_sizes_c[nm] += int(ss)
# -- Arrays
arr_idx = node_type_names.index('array') if 'array' in node_type_names else -1
arr_mask = (types == arr_idx)
large_arrs= sorted(zip(self_sz[arr_mask].tolist(), edge_cnt[arr_mask].tolist(), names_idx[arr_mask].tolist()), reverse=True)[:20]
# -- Strings
str_types = set()
for tn in ('string', 'concatenated string', 'sliced string'):
if tn in node_type_names:
str_types.add(node_type_names.index(tn))
str_mask = np.zeros(len(types), dtype=bool)
for st in str_types:
str_mask |= (types == st)
large_strs = sorted(zip(self_sz[str_mask].tolist(), names_idx[str_mask].tolist()), reverse=True)[:20]
del nodes # free memory before edges
print(" Node analysis done.", flush=True)
# ── Step 5: Edge type distribution ───────────────────────────────────────────
print("Step 5: Reading edges section (~435 MB)…", flush=True)
raw_edges_bytes = read_section_bytes(SNAPSHOT_PATH, EDGES_OFFSET, STRINGS_OFFSET - 2)
print(" Parsing edge integers…", flush=True)
inner_e = find_array_content(raw_edges_bytes)
del raw_edges_bytes
edges = np.fromstring(inner_e, dtype=np.int32, sep=',')
del inner_e
print(f" Edges array: {edges.shape} ({edges.nbytes/1024/1024:.1f} MB)", flush=True)
edges = edges[:edge_count * EF].reshape(-1, EF)
edge_types = edges[:, ei_type]
del edges
edge_type_dist = collections.Counter()
for et_val in np.unique(edge_types):
ename = edge_type_names[int(et_val)] if int(et_val) < len(edge_type_names) else str(et_val)
edge_type_dist[ename] = int((edge_types == et_val).sum())
del edge_types
print(" Edge analysis done.", flush=True)
# ── Print Results ──────────────────────────────────────────────────────────────
SEP = '=' * 70
print(f"\n{SEP}")
print(f" HEAP SNAPSHOT SUMMARY")
print(f" Nodes : {node_count:,}")
print(f" Edges : {edge_count:,}")
print(f" Strings : {len(strings):,}")
print(f" Total self : {total_self/1024/1024:.1f} MB")
print(SEP)
print("\n== 1. NODE TYPE DISTRIBUTION ==")
print(f" Total self-size : {total_self/1024/1024:.1f} MB")
for tn, cnt in type_counts.most_common():
sz = type_sizes[tn]
print(f" {tn:28s} count={cnt:>8,} size={sz/1024/1024:8.2f} MB")
print("\n== 2. TOP 50 NODES BY SELF SIZE ==")
for rank, idx in enumerate(top50_idx, 1):
t = int(types[idx])
nm
const fs = require('fs');
const heapFile = '/Users/devarajns/Downloads/Heap-20260304T134342.heapsnapshot';
console.log('Loading heap snapshot...');
const fileContent = fs.readFileSync(heapFile, 'utf8');
const data = JSON.parse(fileContent);
// Nodes, edges, and strings are at the top level
const { snapshot, nodes, edges, strings } = data;
const { meta, node_count, edge_count } = snapshot;
console.log(`\n=== HEAP SNAPSHOT ANALYSIS ===`);
console.log(`Nodes: ${node_count.toLocaleString()}, Edges: ${edge_count.toLocaleString()}`);
// Node fields: type, name, id, self_size, edge_count, detachedness
const nodeTypes = meta.node_types[0];
const stringsArray = strings || [];
function getNodeType(idx) {
const typeIdx = idx * 6;
const typeVal = nodes[typeIdx];
return nodeTypes[typeVal] || 'unknown';
}
function getNodeName(idx) {
const typeIdx = idx * 6;
const nameIdx = nodes[typeIdx + 1];
return stringsArray[nameIdx] || `string_${nameIdx}`;
}
function getNodeSelfSize(idx) {
const typeIdx = idx * 6;
return nodes[typeIdx + 3];
}
function getNodeEdgeCount(idx) {
const typeIdx = idx * 6;
return nodes[typeIdx + 4];
}
function getNodeDetachedness(idx) {
const typeIdx = idx * 6;
return nodes[typeIdx + 5];
}
// Summary by type
console.log('\n=== SUMMARY BY TYPE ===');
const typeSummary = {};
for (let i = 0; i < node_count; i++) {
const type = getNodeType(i);
if (!typeSummary[type]) {
typeSummary[type] = { count: 0, size: 0 };
}
typeSummary[type].count++;
typeSummary[type].size += getNodeSelfSize(i);
}
Object.entries(typeSummary)
.sort((a, b) => b[1].size - a[1].size)
.forEach(([type, stats]) => {
console.log(`${type}: ${stats.count.toLocaleString()} objects, ${(stats.size / 1024 / 1024).toFixed(2)} MB`);
});
// Analyze strings - look for floating point duplicates
console.log('\n=== TOP 30 DUPLICATE STRINGS ===');
const stringCount = {};
const stringSize = {};
for (let i = 0; i < node_count; i++) {
const type = getNodeType(i);
if (type === 'string') {
const name = getNodeName(i);
stringCount[name] = (stringCount[name] || 0) + 1;
stringSize[name] = (stringSize[name] || 0) + getNodeSelfSize(i);
}
}
const stringsByCount = Object.entries(stringCount)
.filter(([str, count]) => count > 50)
.sort((a, b) => b[1] - a[1]);
console.log('Top duplicate strings (count > 50):');
stringsByCount.slice(0, 30).forEach(([str, count], i) => {
const size = stringSize[str];
console.log(`${i+1}. "${str.substring(0, 60)}" - count: ${count}, total size: ${(size / 1024).toFixed(2)} KB`);
});
// Analyze closures
console.log('\n=== CLOSURE ANALYSIS ===');
let closureCount = 0;
let closureSize = 0;
for (let i = 0; i < node_count; i++) {
const type = getNodeType(i);
if (type === 'closure') {
closureCount++;
closureSize += getNodeSelfSize(i);
}
}
console.log(`Total closures: ${closureCount.toLocaleString()}, Total size: ${(closureSize / 1024).toFixed(2)} KB`);
// Analyze detached DOM trees
console.log('\n=== DETACHED DOM TREES ===');
const detached = [];
for (let i = 0; i < node_count; i++) {
const detachedness = getNodeDetachedness(i);
if (detachedness > 0) {
const name = getNodeName(i);
detached.push({
type: getNodeType(i),
name: name.substring(0, 80),
detachedness,
size: getNodeSelfSize(i)
});
}
}
detached.sort((a, b) => b.detachedness - a.detachedness);
console.log(`Total detached nodes: ${detached.length}`);
// Count popover containers
const popoverCount = detached.filter(d => d.name.includes('popover')).length;
console.log(`Detached popover containers: ${popoverCount}`);
// Top detached
console.log('\nTop 10 detached:');
detached.slice(0, 10).forEach((node, i) => {
console.log(`${i+1}. ${node.name} - detachedness: ${node.detachedness}, size: ${(node.size / 1024).toFixed(2)} KB`);
});
// Large objects
console.log('\n=== TOP 20 LARGEST OBJECTS ===');
const largeObjects = [];
for (let i = 0; i < node_count; i++) {
const type = getNodeType(i);
const selfSize = getNodeSelfSize(i);
if (selfSize > 20000) {
largeObjects.push({
type,
name: getNodeName(i).substring(0, 80),
selfSize,
});
}
}
largeObjects.sort((a, b) => b.selfSize - a.selfSize);
largeObjects.slice(0, 20).forEach((obj, i) => {
console.log(`${i+1}. ${obj.type}: ${obj.name} - ${(obj.selfSize / 1024).toFixed(2)} KB`);
});
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment