-
-
Save jcstein/1c931501c7a136580363ce6928467b8d to your computer and use it in GitHub Desktop.
| #!/usr/bin/env bash | |
| # Bash-only Celestia blob submit load tester. | |
| # Usage: COUNT=100 NS=0x676d DATA=0x676d CMD="celestia blob submit" ./celestia_blob_bench.sh | |
| set -euo pipefail | |
| COUNT=${COUNT:-20} | |
| CMD=${CMD:-"celestia blob submit"} | |
| NS=${NS:-"0x676d"} | |
| DATA=${DATA:-"0x676d"} | |
| printf "Running %s submissions: %s %s %s\n" "$COUNT" "$CMD" "$NS" "$DATA" | |
| ok=0 | |
| fail=0 | |
| times=() | |
| has_jq=1 | |
| if ! command -v jq >/dev/null 2>&1; then | |
| has_jq=0 | |
| echo "Note: jq not found; height/commitment parsing skipped." >&2 | |
| fi | |
| for i in $(seq 1 "$COUNT"); do | |
| start_ns=$(date +%s%N) | |
| out=$($CMD "$NS" "$DATA" 2>&1) || rc=$? | |
| end_ns=$(date +%s%N) | |
| dur_ms=$(( (end_ns - start_ns) / 1000000 )) | |
| rc=${rc:-0} | |
| if [ "$rc" -eq 0 ]; then | |
| ok=$((ok + 1)) | |
| times+=("$dur_ms") | |
| if [ "$has_jq" -eq 1 ]; then | |
| height=$(echo "$out" | jq -r '(.result.height // .height // "-")') | |
| commitment=$(echo "$out" | jq -r '(.result.commitments[0] // .commitments[0] // "-")') | |
| else | |
| height="-" | |
| commitment="-" | |
| fi | |
| printf "[%03d] ok %7.2f ms | height=%s | commitment=%s\n" "$i" "$dur_ms" "$height" "$commitment" | |
| else | |
| fail=$((fail + 1)) | |
| printf "[%03d] fail %7.2f ms | rc=%s | %s\n" "$i" "$dur_ms" "$rc" "$out" | |
| fi | |
| unset rc | |
| done | |
| if [ "${#times[@]}" -gt 0 ]; then | |
| sum=0 | |
| min=${times[0]} | |
| max=${times[0]} | |
| for t in "${times[@]}"; do | |
| sum=$((sum + t)) | |
| (( t < min )) && min=$t | |
| (( t > max )) && max=$t | |
| done | |
| avg=$(printf "%s\n" "${times[@]}" | awk '{s+=$1; c++} END {if(c>0) printf "%.2f", s/c; else print "0"}') | |
| printf "\nSummary: total=%s ok=%s fail=%s\n" "$COUNT" "$ok" "$fail" | |
| printf "Durations (ms): avg=%s min=%s max=%s\n" "$avg" "$min" "$max" | |
| else | |
| printf "\nSummary: total=%s ok=%s fail=%s (no successes to compute stats)\n" "$COUNT" "$ok" "$fail" | |
| fi |
📊 Updated Stats (Removing the 58,556 ms Outlier)
Averages
- Run 1 avg (rpc-mocha.pops.one): 12,597.20 ms
- Run 2 avg (full.consensus.mocha-4.celestia-mocha.com) (outlier removed): 12,800.58 ms
Difference
- Absolute: +203.38 ms
- Percentage: ~1.61% slower
➡️ Once the outlier is removed, the two runs perform nearly identically, with Run 2 only ~1.6% slower.
📈 Graph (Run 1 vs Run 2 Without Outlier)
The graph below was generated from your data and shows latencies per submission:
from celestia-app usinggo run ./tools/latency-monitor -a [YOUR_ACCOUNT_MONIKER] -b 1048000 -z 1048000 -d 1500ms -e "grpc-mocha.pops.one:9090"
where my account is: https://mocha.celenium.io/address/celestia1mh0hxmde0mcpx4zhzlt0wua4490fhzjzsc2w6r?tab=transactions
Transaction Statistics:
Total transactions: 294
Successful: 294 (100.0%)
Failed: 0 (0.0%)
Latency Statistics (successful transactions only):
Average latency: 12406.89 ms
Standard deviation: 3826.13 ms
using go run ./tools/latency-monitor -a [YOUR_ACCOUNT_MONIKER] -b 1048000 -z 1048000 -d 1500ms -e "full.consensus.mocha-4.celestia-mocha.com:9090"
with the same account:
Transaction Statistics:
Total transactions: 293
Successful: 293 (100.0%)
Failed: 0 (0.0%)
Latency Statistics (successful transactions only):
Average latency: 12133.84 ms
Standard deviation: 4082.07 ms
Latency Summary (grpc-mocha.pops.one vs full.consensus.mocha-4)
-
grpc-mocha.pops.one
- Avg latency: 12,406.89 ms
- Std dev: 3,826.13 ms
-
full.consensus.mocha-4
- Avg latency: 12,133.84 ms
- Std dev: 4,082.07 ms
Comparison
- full.consensus.mocha-4 is ~273 ms faster on average
- This equals about 2.2% lower latency
- However, it also has slightly higher jitter (larger std deviation)
Bottom Line
Both endpoints perform similarly, but full.consensus.mocha-4 is marginally faster while grpc-mocha.pops.one is slightly more stable.
Celestia Blob Submit Benchmark Comparison
Runs: 20 submissions each
Command:
celestia blob submit 0x676d 0x676dAverage Latency
Min / Max Latencies
Run 1: min 11,520 ms · max 19,713 ms
Run 2: min 10,654 ms · max 58,556 ms
Summary
Typical per-submission latency (11–13s) is similar in both runs.
The primary difference is consistency: