Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
111 changes: 111 additions & 0 deletions bin/reth-bench/scripts/run_proof_benchmarks.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
#!/bin/bash
# Run proof generation benchmarks and save results
#
# Usage:
# ./run_proof_benchmarks.sh # Run all benchmarks
# ./run_proof_benchmarks.sh --quick # Run with fewer samples (faster)
# ./run_proof_benchmarks.sh --baseline # Save as baseline for comparison

set -euo pipefail

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$SCRIPT_DIR" # Script is already in project root

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color

echo -e "${GREEN}========================================${NC}"
echo -e "${GREEN}Proof Generation Benchmark Runner${NC}"
echo -e "${GREEN}========================================${NC}"
echo

# Check if cargo is available
if ! command -v cargo &> /dev/null; then
echo -e "${RED}Error: cargo not found${NC}"
exit 1
fi

cd "$PROJECT_ROOT"

# Parse arguments
QUICK_MODE=""
BASELINE_MODE=""
COMPARE_MODE=""

for arg in "$@"; do
case $arg in
--quick)
QUICK_MODE="--quick"
echo -e "${YELLOW}Quick mode enabled (fewer samples)${NC}"
shift
;;
--baseline)
BASELINE_MODE="--save-baseline"
echo -e "${YELLOW}Saving results as baseline${NC}"
shift
;;
--compare)
COMPARE_MODE="--baseline"
echo -e "${YELLOW}Comparing against baseline${NC}"
shift
;;
*)
;;
esac
done

echo -e "${GREEN}Building benchmark...${NC}"
cargo build --release --package reth-trie-parallel --benches

echo
echo -e "${GREEN}Running benchmarks...${NC}"
echo -e "${YELLOW}This will measure:${NC}"
echo " - Single storage proof generation time"
echo " - Account multiproof generation (parallel)"
echo " - MDBX trie node read latency"
echo " - Detailed timing breakdowns"
echo

# Run the benchmark
if [ -n "$BASELINE_MODE" ]; then
cargo bench --package reth-trie-parallel --bench proof_generation $QUICK_MODE -- --save-baseline proof_baseline
elif [ -n "$COMPARE_MODE" ]; then
cargo bench --package reth-trie-parallel --bench proof_generation $QUICK_MODE -- --baseline proof_baseline
else
cargo bench --package reth-trie-parallel --bench proof_generation $QUICK_MODE
fi

echo
echo -e "${GREEN}========================================${NC}"
echo -e "${GREEN}Benchmark Results${NC}"
echo -e "${GREEN}========================================${NC}"
echo
echo "Results saved to: target/criterion/"
echo
echo "To view detailed HTML reports:"
echo " open target/criterion/report/index.html"
echo
echo "To compare against baseline:"
echo " ./run_proof_benchmarks.sh --compare"
echo

# Extract and display key metrics if criterion output exists
if [ -f "target/criterion/single_storage_proof/slots_10/mdbx_direct/new/estimates.json" ]; then
echo -e "${GREEN}Quick Summary (Single Proof with 10 slots):${NC}"

# Use jq if available, otherwise skip
if command -v jq &> /dev/null; then
MEAN=$(jq -r '.mean.point_estimate' target/criterion/single_storage_proof/slots_10/mdbx_direct/new/estimates.json)
# Convert to ms (criterion stores in ns)
MEAN_MS=$(echo "scale=3; $MEAN / 1000000" | bc)
echo " Mean time: ${MEAN_MS}ms"
else
echo " (Install 'jq' for summary statistics)"
fi
fi

echo
echo -e "${GREEN}Done!${NC}"
6 changes: 6 additions & 0 deletions crates/trie/parallel/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ reth-primitives-traits.workspace = true
reth-provider = { workspace = true, features = ["test-utils"] }
reth-trie-db.workspace = true
reth-trie = { workspace = true, features = ["test-utils"] }
reth-db.workspace = true
reth-db-common.workspace = true

# misc
rand.workspace = true
Expand All @@ -69,3 +71,7 @@ test-utils = [
[[bench]]
name = "root"
harness = false

[[bench]]
name = "proof_generation"
harness = false
205 changes: 205 additions & 0 deletions crates/trie/parallel/benches/proof_generation.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
//! Benchmark for measuring MDBX read latency during state root calculation.
//!
//! This measures the time to calculate state roots, which involves:
//! - Reading trie nodes from MDBX
//! - Generating proofs internally
//! - Walking the trie structure
//!
//! Run with: cargo bench --package reth-trie-parallel --bench proof_generation

#![allow(missing_docs)]

use alloy_primitives::{B256, U256};
use criterion::{
black_box, criterion_group, criterion_main, BenchmarkId, Criterion, SamplingMode,
};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use proptest_arbitrary_interop::arb;
use reth_primitives_traits::Account;
use reth_provider::{test_utils::create_test_provider_factory, StateWriter, TrieWriter};
use reth_trie::{
hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot,
};
use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot};
use std::{collections::HashMap, time::Instant};

/// Generate test data with accounts and storage
fn generate_test_data(num_accounts: usize, storage_slots: usize) -> HashedPostState {
let mut runner = TestRunner::deterministic();

use proptest::{collection::hash_map, prelude::any};
let db_state = hash_map(
any::<B256>(),
(
arb::<Account>().prop_filter("non empty account", |a| !a.is_empty()),
hash_map(
any::<B256>(),
any::<U256>().prop_filter("non zero value", |v| !v.is_zero()),
storage_slots,
),
),
num_accounts,
)
.new_tree(&mut runner)
.unwrap()
.current();

HashedPostState::default()
.with_accounts(db_state.iter().map(|(address, (account, _))| (*address, Some(*account))))
.with_storages(
db_state
.into_iter()
.map(|(address, (_, storage))| (address, HashedStorage::from_iter(false, storage))),
)
}

/// Benchmark state root calculation with varying complexity
/// This internally performs MDBX trie node reads
fn bench_state_root_mdbx_reads(c: &mut Criterion) {
let mut group = c.benchmark_group("state_root_mdbx_timing");
group.sampling_mode(SamplingMode::Flat);
group.sample_size(20);

// Test different scales to measure MDBX read performance
let scenarios = vec![
("small_100_accounts", 100, 10),
("medium_500_accounts", 500, 10),
("large_1000_accounts", 1000, 20),
];

for (name, num_accounts, storage_slots) in scenarios {
let (db_state, updated_state) = {
let full_state = generate_test_data(num_accounts, storage_slots);
let keys: Vec<_> = full_state.accounts.keys().copied().collect();
let update_keys: Vec<_> = keys.iter().take(num_accounts / 2).copied().collect();

let db_state = full_state.clone();
let mut updated_state = HashedPostState::default();

for key in update_keys {
if let Some(storage) = db_state.storages.get(&key) {
updated_state.storages.insert(key, storage.clone());
}
}

(db_state, updated_state)
};

let factory = create_test_provider_factory();
{
let provider_rw = factory.provider_rw().unwrap();
provider_rw.write_hashed_state(&db_state.into_sorted()).unwrap();
let (_, updates) =
StateRoot::from_tx(provider_rw.tx_ref()).root_with_updates().unwrap();
provider_rw.write_trie_updates(updates).unwrap();
provider_rw.commit().unwrap();
}

group.bench_function(BenchmarkId::new("increm ental_root", name), |b| {
b.iter_with_setup(
|| {
let sorted_state = updated_state.clone().into_sorted();
let prefix_sets = updated_state.construct_prefix_sets().freeze();
let provider = factory.provider().unwrap();
(provider, sorted_state, prefix_sets)
},
|(provider, sorted_state, prefix_sets)| {
let start = Instant::now();

// This internally performs MDBX trie node reads
let hashed_cursor_factory = HashedPostStateCursorFactory::new(
DatabaseHashedCursorFactory::new(provider.tx_ref()),
&sorted_state,
);
let _root = StateRoot::from_tx(provider.tx_ref())
.with_hashed_cursor_factory(hashed_cursor_factory)
.with_prefix_sets(prefix_sets)
.root()
.expect("failed to compute root");

let elapsed = start.elapsed();
black_box(elapsed)
},
)
});

// Also bench full root (more MDBX reads)
group.bench_function(BenchmarkId::new("full_root", name), |b| {
b.iter_with_setup(
|| factory.provider().unwrap(),
|provider| {
let start = Instant::now();

let _root =
StateRoot::from_tx(provider.tx_ref()).root().expect("failed to compute root");

let elapsed = start.elapsed();
black_box(elapsed)
},
)
});
}

group.finish();
}

/// Benchmark with different update sizes to see MDBX read scaling
fn bench_update_sizes(c: &mut Criterion) {
let mut group = c.benchmark_group("update_size_scaling");
group.sampling_mode(SamplingMode::Flat);
group.sample_size(20);

let base_state = generate_test_data(1000, 10);
let factory = create_test_provider_factory();
{
let provider_rw = factory.provider_rw().unwrap();
provider_rw.write_hashed_state(&base_state.clone().into_sorted()).unwrap();
let (_, updates) = StateRoot::from_tx(provider_rw.tx_ref()).root_with_updates().unwrap();
provider_rw.write_trie_updates(updates).unwrap();
provider_rw.commit().unwrap();
}

// Test different update sizes
for update_size in [10, 50, 100, 200] {
let keys: Vec<_> = base_state.accounts.keys().copied().collect();
let update_keys: Vec<_> = keys.iter().take(update_size).copied().collect();

let mut updated_state = HashedPostState::default();
for key in update_keys {
if let Some(storage) = base_state.storages.get(&key) {
updated_state.storages.insert(key, storage.clone());
}
}

group.bench_function(BenchmarkId::new("accounts_updated", update_size), |b| {
b.iter_with_setup(
|| {
let sorted_state = updated_state.clone().into_sorted();
let prefix_sets = updated_state.construct_prefix_sets().freeze();
let provider = factory.provider().unwrap();
(provider, sorted_state, prefix_sets)
},
|(provider, sorted_state, prefix_sets)| {
let start = Instant::now();

let hashed_cursor_factory = HashedPostStateCursorFactory::new(
DatabaseHashedCursorFactory::new(provider.tx_ref()),
&sorted_state,
);
let _root = StateRoot::from_tx(provider.tx_ref())
.with_hashed_cursor_factory(hashed_cursor_factory)
.with_prefix_sets(prefix_sets)
.root()
.expect("failed to compute root");

black_box(start.elapsed())
},
)
});
}

group.finish();
}

criterion_group!(benches, bench_state_root_mdbx_reads, bench_update_sizes,);
criterion_main!(benches);