diff --git a/.claude/skills/test-pr-devnet/SKILL.md b/.claude/skills/test-pr-devnet/SKILL.md new file mode 100644 index 0000000..4f3829b --- /dev/null +++ b/.claude/skills/test-pr-devnet/SKILL.md @@ -0,0 +1,260 @@ +--- +name: test-pr-devnet +description: Test ethlambda PR changes in multi-client devnet. Use when users want to (1) Test a branch/PR with other Lean clients, (2) Validate BlocksByRoot or P2P protocol changes, (3) Test sync recovery with pause/unpause, (4) Verify cross-client interoperability, (5) Run integration tests before merging. +disable-model-invocation: true +--- + +# Test PR in Devnet + +Test ethlambda branch changes in a multi-client local devnet with zeam (Zig), ream (Rust), qlean (C++), and ethlambda. + +## Quick Start + +```bash +# Test current branch (basic interoperability, ~60-90s) +.claude/skills/test-pr-devnet/scripts/test-branch.sh + +# Test with sync recovery (BlocksByRoot validation, ~90-120s) +.claude/skills/test-pr-devnet/scripts/test-branch.sh --with-sync-test + +# Test specific branch +.claude/skills/test-pr-devnet/scripts/test-branch.sh my-feature-branch + +# Check status while running +.claude/skills/test-pr-devnet/scripts/check-status.sh + +# Cleanup when done +.claude/skills/test-pr-devnet/scripts/cleanup.sh +``` + +## What It Does + +1. **Builds branch-specific Docker image** tagged as `ghcr.io/lambdaclass/ethlambda:` +2. **Updates lean-quickstart config** to use new image (backs up original) +3. **Starts 4-node devnet** with fresh genesis (zeam, ream, qlean, ethlambda) +4. **Optionally tests sync recovery** by pausing/unpausing nodes +5. **Analyzes results** and provides summary +6. **Leaves devnet running** for manual inspection + +## Prerequisites + +| Requirement | Location | Check | +|-------------|----------|-------| +| lean-quickstart | `/Users/mega/lean_consensus/lean-quickstart` | `ls $LEAN_QUICKSTART` | +| Docker running | - | `docker ps` | +| Git repository | ethlambda root | `git branch` | + +## Test Scenarios + +### Basic Interoperability (~60-90s) + +**Goal:** Verify ethlambda produces blocks and reaches consensus with other clients + +**Success criteria:** +- ✅ No errors in ethlambda logs +- ✅ All 4 nodes at same head slot +- ✅ Finalization advancing (every 6-12 slots) +- ✅ Each validator produces blocks for their slots + +### Sync Recovery (~90-120s) + +**Goal:** Test BlocksByRoot request/response when nodes fall behind + +**Usage:** Add `--with-sync-test` flag + +**What happens:** +1. Devnet runs for 10s (~2-3 slots) +2. Pauses `zeam_0` and `qlean_0` +3. Network progresses 20s (~5 slots) +4. Unpauses nodes → nodes sync + +**Success criteria:** +- ✅ Inbound BlocksByRoot requests logged +- ✅ Successful responses sent +- ✅ Paused nodes sync to current head + +## Configuration Changes + +The skill modifies `lean-quickstart/client-cmds/ethlambda-cmd.sh` to use your branch's Docker image. + +**Automatic backup:** Creates `ethlambda-cmd.sh.backup` + +**Restore methods:** +```bash +# 1. Cleanup script (recommended) +.claude/skills/test-pr-devnet/scripts/cleanup.sh + +# 2. Manual restore +mv $LEAN_QUICKSTART/client-cmds/ethlambda-cmd.sh.backup \ + $LEAN_QUICKSTART/client-cmds/ethlambda-cmd.sh + +# 3. Git restore (if no uncommitted changes) +cd $LEAN_QUICKSTART && git checkout client-cmds/ethlambda-cmd.sh +``` + +## Manual Workflow (Alternative to Script) + +If you need fine-grained control: + +### 1. Build Image + +```bash +cd /Users/mega/lean_consensus/ethlambda +BRANCH=$(git rev-parse --abbrev-ref HEAD) +docker build \ + --build-arg GIT_COMMIT=$(git rev-parse HEAD) \ + --build-arg GIT_BRANCH=$BRANCH \ + -t ghcr.io/lambdaclass/ethlambda:$BRANCH . +``` + +### 2. Update Configuration + +Edit `$LEAN_QUICKSTART/client-cmds/ethlambda-cmd.sh` line 17: +```bash +node_docker="ghcr.io/lambdaclass/ethlambda: \ +``` + +### 3. Start Devnet + +```bash +cd $LEAN_QUICKSTART +NETWORK_DIR=local-devnet ./spin-node.sh --node all --generateGenesis --metrics +``` + +### 4. Test Sync (Optional) + +```bash +# Create sync gap +docker pause zeam_0 qlean_0 +sleep 20 # Network progresses + +# Test recovery +docker unpause zeam_0 qlean_0 +sleep 10 # Wait for sync +``` + +### 5. Check Results + +```bash +# Quick status +.claude/skills/test-pr-devnet/scripts/check-status.sh + +# Detailed analysis (use devnet-log-review skill in lean-quickstart) +cd $LEAN_QUICKSTART +.claude/skills/devnet-log-review/scripts/analyze-logs.sh +``` + +## Protocol Compatibility + +| Client | Status | Gossipsub | BlocksByRoot | +|--------|--------|-----------|--------------| +| ream | ✅ Full | ✅ Full | ✅ Full | +| zeam | ✅ Full | ✅ Full | ⚠️ Limited | +| qlean | ✅ Full | ✅ Full | ⚠️ Limited | +| ethlambda | ✅ Full | ✅ Full | ✅ Full | + +**Notes:** +- zeam/qlean BlocksByRoot errors are expected (not a blocker) +- ream ↔ ethlambda BlocksByRoot should work perfectly +- All clients use Gossipsub for block propagation + +## Verification Checklist + +| Check | Command | Expected | +|-------|---------|----------| +| All nodes running | `docker ps --filter "name=_0"` | 4 containers | +| Peers connected | `docker logs ethlambda_0 \| grep "Received status request" \| wc -l` | > 10 | +| Blocks produced | `docker logs ethlambda_0 \| grep "Published block" \| wc -l` | > 0 | +| No errors | `docker logs ethlambda_0 \| grep ERROR \| wc -l` | 0 | + +## Troubleshooting + +### Build Fails +```bash +docker ps # Check Docker running +docker system prune -a # Clean cache if needed +``` + +### Nodes Won't Start +```bash +# Clean and retry +docker stop zeam_0 ream_0 qlean_0 ethlambda_0 2>/dev/null +docker rm zeam_0 ream_0 qlean_0 ethlambda_0 2>/dev/null +cd $LEAN_QUICKSTART +NETWORK_DIR=local-devnet ./spin-node.sh --node all --generateGenesis +``` + +### Genesis Mismatch +```bash +cd $LEAN_QUICKSTART +NETWORK_DIR=local-devnet ./spin-node.sh --node all --cleanData --generateGenesis +``` + +### Image Tag Not Updated +```bash +# Verify the change +grep "node_docker=" $LEAN_QUICKSTART/client-cmds/ethlambda-cmd.sh +# Should show your branch name, not :local +``` + +### Port Already in Use +```bash +docker stop $(docker ps -q --filter "name=_0") 2>/dev/null || true +``` + +## Debugging + +### P2P Request/Response Debugging + +```bash +# Check inbound BlocksByRoot handling +docker logs ethlambda_0 2>&1 | grep "Received BlocksByRoot request" +docker logs ethlambda_0 2>&1 | grep "Responding to BlocksByRoot" + +# Check outbound BlocksByRoot requests +docker logs ethlambda_0 2>&1 | grep "Sending BlocksByRoot request" +docker logs ethlambda_0 2>&1 | grep "Received BlocksByRoot response" + +# Check for protocol errors +docker logs ethlambda_0 2>&1 | grep -E "Outbound request failed|protocol.*not.*support" + +# Count requests/responses +docker logs ethlambda_0 2>&1 | grep "Received BlocksByRoot request" | wc -l +``` + +### Devnet Status Checks + +```bash +# Check all nodes are running +docker ps --format "{{.Names}}: {{.Status}}" --filter "name=_0" + +# Get current chain status (zeam) +docker logs zeam_0 2>&1 | tail -100 | grep "CHAIN STATUS" | tail -1 + +# Get fork choice updates (ethlambda) +docker logs ethlambda_0 2>&1 | grep "Fork choice head updated" | tail -5 + +# Check peer connectivity +docker logs ethlambda_0 2>&1 | grep "Received status request" | wc -l +``` + +### Common Investigation Patterns + +```bash +# Verify ethlambda is proposing blocks +docker logs ethlambda_0 2>&1 | grep "We are the proposer" + +# Compare finalized slots across clients +for node in zeam_0 ream_0 ethlambda_0; do + echo "$node:" + docker logs "$node" 2>&1 | grep -i "finalized" | tail -1 +done + +# Check peer discovery +docker logs ethlambda_0 2>&1 | grep -i "peer\|connection" | head -20 +``` + +## References + +- **[ethlambda CLAUDE.md](../../CLAUDE.md)** - Development workflow, detailed debugging commands +- **[lean-quickstart devnet-log-review](../../../lean-quickstart/.claude/skills/devnet-log-review/SKILL.md)** - Comprehensive log analysis diff --git a/.claude/skills/test-pr-devnet/scripts/check-status.sh b/.claude/skills/test-pr-devnet/scripts/check-status.sh new file mode 100755 index 0000000..f04cc6b --- /dev/null +++ b/.claude/skills/test-pr-devnet/scripts/check-status.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Quick devnet status check + +# Colors +GREEN='\033[0;32m' +RED='\033[0;31m' +BLUE='\033[0;34m' +NC='\033[0m' + +echo -e "${BLUE}=== Devnet Status ===${NC}" +echo "" + +# Check running nodes +echo "Running nodes:" +docker ps --format " {{.Names}}: {{.Status}}" --filter "name=_0" +echo "" + +# Check each node's latest status +for node in zeam_0 ream_0 qlean_0 ethlambda_0; do + if docker ps --format "{{.Names}}" | grep -q "^$node$"; then + echo -e "${GREEN}$node${NC}:" + + case $node in + zeam_0) + docker logs zeam_0 2>&1 | tail -100 | grep "CHAIN STATUS" | tail -1 | sed 's/^/ /' + ;; + ethlambda_0) + docker logs ethlambda_0 2>&1 | grep "Fork choice head updated" | tail -1 | sed 's/^/ /' + ;; + *) + echo " (check logs manually)" + ;; + esac + echo "" + fi +done + +# Check peer connectivity +if docker ps --format "{{.Names}}" | grep -q "^ethlambda_0$"; then + PEERS=$(docker logs ethlambda_0 2>&1 | grep "Received status request" | wc -l | tr -d ' ') + echo "ethlambda peer interactions: $PEERS" + echo "" +fi + +# Quick error check +echo "Error counts:" +for node in zeam_0 ream_0 qlean_0 ethlambda_0; do + if docker ps --format "{{.Names}}" | grep -q "^$node$"; then + COUNT=$(docker logs "$node" 2>&1 | grep -c "ERROR" || echo "0") + if [[ "$COUNT" -eq 0 ]]; then + echo -e " $node: ${GREEN}$COUNT${NC}" + else + echo -e " $node: ${RED}$COUNT${NC}" + fi + fi +done diff --git a/.claude/skills/test-pr-devnet/scripts/cleanup.sh b/.claude/skills/test-pr-devnet/scripts/cleanup.sh new file mode 100755 index 0000000..f664d3f --- /dev/null +++ b/.claude/skills/test-pr-devnet/scripts/cleanup.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Cleanup devnet and restore configurations + +LEAN_QUICKSTART="${LEAN_QUICKSTART:-/Users/mega/lean_consensus/lean-quickstart}" +ETHLAMBDA_CMD="$LEAN_QUICKSTART/client-cmds/ethlambda-cmd.sh" + +# Colors +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' + +echo -e "${BLUE}=== Devnet Cleanup ===${NC}" +echo "" + +# Stop devnet +echo "Stopping devnet..." +cd "$LEAN_QUICKSTART" +NETWORK_DIR=local-devnet ./spin-node.sh --node all --stop 2>/dev/null || true + +# Force remove containers +echo "Removing containers..." +docker rm -f zeam_0 ream_0 qlean_0 ethlambda_0 2>/dev/null || true + +echo -e "${GREEN}✓ Devnet stopped${NC}" +echo "" + +# Restore config if backup exists +if [[ -f "$ETHLAMBDA_CMD.backup" ]]; then + echo "Restoring ethlambda-cmd.sh..." + mv "$ETHLAMBDA_CMD.backup" "$ETHLAMBDA_CMD" + echo -e "${GREEN}✓ Config restored${NC}" +else + echo "No backup found, skipping config restore" +fi + +echo "" +echo "Cleanup complete!" diff --git a/.claude/skills/test-pr-devnet/scripts/test-branch.sh b/.claude/skills/test-pr-devnet/scripts/test-branch.sh new file mode 100755 index 0000000..d768dcf --- /dev/null +++ b/.claude/skills/test-pr-devnet/scripts/test-branch.sh @@ -0,0 +1,264 @@ +#!/bin/bash +set -euo pipefail + +# Test ethlambda branch in multi-client devnet +# Usage: ./test-branch.sh [branch-name] [--with-sync-test] + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ETHLAMBDA_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +LEAN_QUICKSTART="${LEAN_QUICKSTART:-/Users/mega/lean_consensus/lean-quickstart}" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Parse arguments +BRANCH_NAME="" +WITH_SYNC_TEST=false + +# First positional arg is branch name (if not a flag) +for arg in "$@"; do + if [[ "$arg" == "--with-sync-test" ]]; then + WITH_SYNC_TEST=true + elif [[ -z "$BRANCH_NAME" ]]; then + BRANCH_NAME="$arg" + fi +done + +# Default to current branch if not specified +if [[ -z "$BRANCH_NAME" ]]; then + BRANCH_NAME=$(git -C "$ETHLAMBDA_ROOT" rev-parse --abbrev-ref HEAD) +fi + +echo -e "${BLUE}=== ethlambda Devnet Testing ===${NC}" +echo "" +echo "Branch: $BRANCH_NAME" +echo "Sync test: $WITH_SYNC_TEST" +echo "ethlambda root: $ETHLAMBDA_ROOT" +echo "lean-quickstart: $LEAN_QUICKSTART" +echo "" + +# Validate prerequisites +echo "Validating prerequisites..." + +if [[ ! -d "$LEAN_QUICKSTART" ]]; then + echo -e "${RED}✗ Error: lean-quickstart not found at $LEAN_QUICKSTART${NC}" + echo " Set LEAN_QUICKSTART environment variable or clone it:" + echo " git clone https://github.com/blockblaz/lean-quickstart.git" + exit 1 +fi + +if [[ ! -f "$LEAN_QUICKSTART/spin-node.sh" ]]; then + echo -e "${RED}✗ Error: spin-node.sh not found in lean-quickstart${NC}" + exit 1 +fi + +if ! docker info &>/dev/null; then + echo -e "${RED}✗ Error: Docker is not running${NC}" + echo " Start Docker Desktop or docker daemon" + exit 1 +fi + +if [[ ! -d "$ETHLAMBDA_ROOT/.git" ]]; then + echo -e "${RED}✗ Error: Not in a git repository${NC}" + echo " Run this script from ethlambda repository root" + exit 1 +fi + +echo -e "${GREEN}✓ Prerequisites validated${NC}" +echo "" + +# Step 1: Build Docker image +echo -e "${BLUE}[1/6] Building Docker image...${NC}" +cd "$ETHLAMBDA_ROOT" +GIT_COMMIT=$(git rev-parse HEAD) + +docker build \ + --build-arg GIT_COMMIT="$GIT_COMMIT" \ + --build-arg GIT_BRANCH="$BRANCH_NAME" \ + -t "ghcr.io/lambdaclass/ethlambda:$BRANCH_NAME" \ + . + +echo -e "${GREEN}✓ Image built: ghcr.io/lambdaclass/ethlambda:$BRANCH_NAME${NC}" +echo "" + +# Step 2: Update ethlambda-cmd.sh +echo -e "${BLUE}[2/6] Updating lean-quickstart config...${NC}" +ETHLAMBDA_CMD="$LEAN_QUICKSTART/client-cmds/ethlambda-cmd.sh" + +# Backup original +cp "$ETHLAMBDA_CMD" "$ETHLAMBDA_CMD.backup" + +# Update docker tag +sed -i.tmp "s|ghcr.io/lambdaclass/ethlambda:[^ ]*|ghcr.io/lambdaclass/ethlambda:$BRANCH_NAME|" "$ETHLAMBDA_CMD" +rm "$ETHLAMBDA_CMD.tmp" + +echo -e "${GREEN}✓ Updated $ETHLAMBDA_CMD${NC}" +echo " (Backup saved as $ETHLAMBDA_CMD.backup)" +echo "" + +# Step 3: Stop any existing devnet +echo -e "${BLUE}[3/6] Cleaning up existing devnet...${NC}" +cd "$LEAN_QUICKSTART" +NETWORK_DIR=local-devnet ./spin-node.sh --node all --stop 2>/dev/null || true +docker rm -f zeam_0 ream_0 qlean_0 ethlambda_0 2>/dev/null || true + +echo -e "${GREEN}✓ Cleanup complete${NC}" +echo "" + +# Step 4: Start devnet +echo -e "${BLUE}[4/6] Starting devnet...${NC}" +echo "This will take ~40 seconds (genesis generation + startup)" +echo "" + +# Run devnet in background +NETWORK_DIR=local-devnet ./spin-node.sh --node all --generateGenesis --metrics > /tmp/devnet-$BRANCH_NAME.log 2>&1 & +DEVNET_PID=$! + +# Wait for nodes to start (check docker ps) +echo -n "Waiting for nodes to start" +for i in {1..40}; do + sleep 1 + echo -n "." + if [[ $(docker ps --filter "name=_0" --format "{{.Names}}" | wc -l) -eq 4 ]]; then + echo "" + echo -e "${GREEN}✓ All 4 nodes running${NC}" + break + fi +done +echo "" + +# Show node status +docker ps --format " {{.Names}}: {{.Status}}" --filter "name=_0" +echo "" + +# Step 5: Sync recovery test (optional) +if [[ "$WITH_SYNC_TEST" == "true" ]]; then + echo -e "${BLUE}[5/6] Testing sync recovery...${NC}" + + # Let devnet run for a bit + echo "Letting devnet run for 10 seconds..." + sleep 10 + + # Pause nodes + echo "Pausing zeam_0 and qlean_0..." + docker pause zeam_0 qlean_0 + echo -e "${YELLOW}⏸ Nodes paused${NC}" + + # Wait for network to progress + echo "Network progressing for 20 seconds (~5 slots)..." + sleep 20 + + # Unpause + echo "Unpausing nodes..." + docker unpause zeam_0 qlean_0 + echo -e "${GREEN}▶ Nodes resumed${NC}" + + # Wait for sync + echo "Waiting 10 seconds for sync recovery..." + sleep 10 + + echo -e "${GREEN}✓ Sync recovery test complete${NC}" + echo "" +else + echo -e "${BLUE}[5/6] Skipping sync recovery test${NC}" + echo "Use --with-sync-test to enable" + echo "" + + # Just let it run for a bit + echo "Letting devnet run for 30 seconds..." + sleep 30 +fi + +# Step 6: Analyze results +echo -e "${BLUE}[6/6] Analyzing results...${NC}" +echo "" + +# Quick status check +echo "=== Quick Status ===" +echo "" + +# Check each node +for node in zeam_0 ream_0 qlean_0 ethlambda_0; do + if docker ps --format "{{.Names}}" | grep -q "^$node$"; then + echo -e "${GREEN}✓${NC} $node: Running" + else + echo -e "${RED}✗${NC} $node: Not running" + fi +done +echo "" + +# Check ethlambda specifics +echo "=== ethlambda Status ===" +echo "" + +# Get latest head +LATEST_HEAD=$(docker logs ethlambda_0 2>&1 | grep "Fork choice head updated" | tail -1 || echo "No head updates found") +echo "$LATEST_HEAD" +echo "" + +# Count peer interactions +PEER_COUNT=$(docker logs ethlambda_0 2>&1 | grep "Received status request" | wc -l | tr -d ' ') +echo "Peer interactions: $PEER_COUNT" + +# Count blocks +BLOCKS_PUBLISHED=$(docker logs ethlambda_0 2>&1 | grep "Published block" | wc -l | tr -d ' ') +echo "Blocks published: $BLOCKS_PUBLISHED" + +# Count errors +ERROR_COUNT=$(docker logs ethlambda_0 2>&1 | grep -c "ERROR" || echo "0") +if [[ "$ERROR_COUNT" -eq 0 ]]; then + echo -e "Errors: ${GREEN}$ERROR_COUNT${NC}" +else + echo -e "Errors: ${RED}$ERROR_COUNT${NC}" +fi +echo "" + +# BlocksByRoot stats (if sync test was run) +if [[ "$WITH_SYNC_TEST" == "true" ]]; then + echo "=== BlocksByRoot Activity ===" + echo "" + + INBOUND=$(docker logs ethlambda_0 2>&1 | grep "Received BlocksByRoot request" | wc -l | tr -d ' ') + RESPONSES=$(docker logs ethlambda_0 2>&1 | grep "Responding to BlocksByRoot" | wc -l | tr -d ' ') + OUTBOUND=$(docker logs ethlambda_0 2>&1 | grep "Sending BlocksByRoot request" | wc -l | tr -d ' ') + + echo "Inbound requests: $INBOUND" + echo "Responses sent: $RESPONSES" + echo "Outbound requests: $OUTBOUND" + echo "" +fi + +# Final verdict +echo "=== Test Result ===" +echo "" +if [[ "$ERROR_COUNT" -eq 0 ]] && [[ "$PEER_COUNT" -gt 0 ]]; then + echo -e "${GREEN}✓ PASSED${NC} - Devnet running successfully" +else + echo -e "${YELLOW}⚠ CHECK LOGS${NC} - Some issues detected" +fi +echo "" + +# Next steps +echo "=== Next Steps ===" +echo "" +echo "Check detailed logs:" +echo " docker logs ethlambda_0 2>&1 | less" +echo "" +echo "Run log analysis:" +echo " cd $LEAN_QUICKSTART" +echo " .claude/skills/devnet-log-review/scripts/analyze-logs.sh" +echo "" +echo "Stop devnet:" +echo " cd $LEAN_QUICKSTART" +echo " NETWORK_DIR=local-devnet ./spin-node.sh --node all --stop" +echo "" +echo "Restore config:" +echo " mv $ETHLAMBDA_CMD.backup $ETHLAMBDA_CMD" +echo "" + +# Keep devnet running +echo -e "${YELLOW}Devnet is still running. Stop it when done testing.${NC}" diff --git a/CLAUDE.md b/CLAUDE.md index dbe0095..20951cb 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -71,16 +71,47 @@ make test # All tests + forkchoice (with skip ### Common Operations ```bash -make run-devnet # Docker build → lean-quickstart local devnet -rm -rf leanSpec && make leanSpec/fixtures # Regenerate test fixtures (requires uv) +.claude/skills/test-pr-devnet/scripts/test-branch.sh # Test branch in multi-client devnet +rm -rf leanSpec && make leanSpec/fixtures # Regenerate test fixtures (requires uv) ``` -### Debugging +### Testing with Local Devnet - +See `.claude/skills/test-pr-devnet/SKILL.md` for multi-client devnet testing workflows. ## Important Patterns & Idioms +### Trait Implementations +```rust +// Prefer From/Into traits over custom from_x/to_x methods +impl From for ResponseCode { fn from(code: u8) -> Self { Self(code) } } +impl From for u8 { fn from(code: ResponseCode) -> Self { code.0 } } + +// Enables idiomatic .into() usage +let code: ResponseCode = byte.into(); +let byte: u8 = code.into(); +``` + +### Ownership for Large Structures +```rust +// Prefer taking ownership to avoid cloning large data (signatures ~3KB) +pub fn consume_signed_block(signed_block: SignedBlockWithAttestation) { ... } + +// Add .clone() at call site if needed - makes cost explicit +store.insert_signed_block(root, signed_block.clone()); +``` + +### Formatting Patterns +```rust +// Extract long arguments into variables so formatter can join lines +// Instead of: +batch.put_batch(Table::X, vec![(key, value)]).expect("msg"); + +// Prefer: +let entries = vec![(key, value)]; +batch.put_batch(Table::X, entries).expect("msg"); +``` + ### Metrics (RAII Pattern) ```rust // Timing guard automatically observes duration on drop @@ -189,6 +220,7 @@ actual_slot = finalized_slot + 1 + relative_index ```bash cargo test --workspace --release # All workspace tests cargo test -p ethlambda-blockchain --features skip-signature-verification --test forkchoice_spectests +cargo test -p ethlambda-blockchain --features skip-signature-verification --test forkchoice_spectests -- --test-threads=1 # Sequential ``` ## Common Gotchas @@ -197,6 +229,10 @@ cargo test -p ethlambda-blockchain --features skip-signature-verification --test - Tests require `skip-signature-verification` feature for performance - Crypto tests marked `#[ignore]` (slow leanVM operations) +### Storage Architecture +- Genesis block has no signatures - stored in Blocks table only, not BlockSignatures +- All other blocks must have entries in both tables + ### State Root Computation - Always computed via `tree_hash_root()` after full state transition - Must match proposer's pre-computed `block.state_root` diff --git a/crates/blockchain/src/store.rs b/crates/blockchain/src/store.rs index e9e252c..fc6114a 100644 --- a/crates/blockchain/src/store.rs +++ b/crates/blockchain/src/store.rs @@ -366,8 +366,8 @@ pub fn on_block( store.update_checkpoints(ForkCheckpoints::new(store.head(), justified, finalized)); } - // Store block and state - store.insert_block(block_root, block.clone()); + // Store signed block and state + store.insert_signed_block(block_root, signed_block.clone()); store.insert_state(block_root, post_state); // Process block body attestations and their signatures diff --git a/crates/common/types/src/block.rs b/crates/common/types/src/block.rs index 88bcf3c..658e239 100644 --- a/crates/common/types/src/block.rs +++ b/crates/common/types/src/block.rs @@ -129,6 +129,44 @@ pub struct BlockWithAttestation { pub proposer_attestation: Attestation, } +/// Stored block signatures and proposer attestation. +/// +/// This type stores the data needed to reconstruct a `SignedBlockWithAttestation` +/// when combined with a `Block` from the blocks table. +#[derive(Clone, Encode, Decode)] +pub struct BlockSignaturesWithAttestation { + /// The proposer's attestation for this block. + pub proposer_attestation: Attestation, + + /// The aggregated signatures for the block. + pub signatures: BlockSignatures, +} + +impl BlockSignaturesWithAttestation { + /// Create from a SignedBlockWithAttestation by consuming it. + /// + /// Takes ownership to avoid cloning large signature data. + pub fn from_signed_block(signed_block: SignedBlockWithAttestation) -> Self { + Self { + proposer_attestation: signed_block.message.proposer_attestation, + signatures: signed_block.signature, + } + } + + /// Reconstruct a SignedBlockWithAttestation given the block. + /// + /// Consumes self to avoid cloning large signature data. + pub fn to_signed_block(self, block: Block) -> SignedBlockWithAttestation { + SignedBlockWithAttestation { + message: BlockWithAttestation { + block, + proposer_attestation: self.proposer_attestation, + }, + signature: self.signatures, + } + } +} + /// The header of a block, containing metadata. /// /// Block headers summarize blocks without storing full content. The header diff --git a/crates/net/p2p/src/req_resp/codec.rs b/crates/net/p2p/src/req_resp/codec.rs index e67e2e1..1a151b1 100644 --- a/crates/net/p2p/src/req_resp/codec.rs +++ b/crates/net/p2p/src/req_resp/codec.rs @@ -7,8 +7,8 @@ use tracing::trace; use super::{ encoding::{decode_payload, write_payload}, messages::{ - BLOCKS_BY_ROOT_PROTOCOL_V1, BlocksByRootRequest, Request, Response, STATUS_PROTOCOL_V1, - Status, + BLOCKS_BY_ROOT_PROTOCOL_V1, BlocksByRootRequest, ErrorMessage, Request, Response, + ResponseCode, ResponsePayload, STATUS_PROTOCOL_V1, Status, }, }; @@ -62,50 +62,41 @@ impl libp2p::request_response::Codec for Codec { where T: AsyncRead + Unpin + Send, { - let mut result = 0_u8; - io.read_exact(std::slice::from_mut(&mut result)).await?; - - // TODO: move matching to ResponseResult impl - let result_code = match result { - 0 => super::messages::ResponseResult::Success, - 1 => super::messages::ResponseResult::InvalidRequest, - _ => { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("invalid result code: {}", result), - )); - } - }; + let mut result_byte = 0_u8; + io.read_exact(std::slice::from_mut(&mut result_byte)) + .await?; - // TODO: send errors to event loop when result != Success? - if result_code != super::messages::ResponseResult::Success { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "non-success result in response", - )); - } + let code = ResponseCode::from(result_byte); let payload = decode_payload(io).await?; + // For non-success responses, the payload contains an SSZ-encoded error message + if code != ResponseCode::SUCCESS { + let message = ErrorMessage::from_ssz_bytes(&payload).map_err(|err| { + io::Error::new( + io::ErrorKind::InvalidData, + format!("Invalid error message: {err:?}"), + ) + })?; + let error_str = String::from_utf8_lossy(&message).to_string(); + trace!(?code, %error_str, "Received error response"); + return Ok(Response::error(code, message)); + } + + // Success responses contain the actual data match protocol.as_ref() { STATUS_PROTOCOL_V1 => { let status = Status::from_ssz_bytes(&payload).map_err(|err| { io::Error::new(io::ErrorKind::InvalidData, format!("{err:?}")) })?; - Ok(Response::new( - result_code, - super::messages::ResponsePayload::Status(status), - )) + Ok(Response::success(ResponsePayload::Status(status))) } BLOCKS_BY_ROOT_PROTOCOL_V1 => { let block = SignedBlockWithAttestation::from_ssz_bytes(&payload).map_err(|err| { io::Error::new(io::ErrorKind::InvalidData, format!("{err:?}")) })?; - Ok(Response::new( - result_code, - super::messages::ResponsePayload::BlocksByRoot(block), - )) + Ok(Response::success(ResponsePayload::BlocksByRoot(block))) } _ => Err(io::Error::new( io::ErrorKind::InvalidData, @@ -142,14 +133,27 @@ impl libp2p::request_response::Codec for Codec { where T: AsyncWrite + Unpin + Send, { - // Send result byte - io.write_all(&[resp.result as u8]).await?; + match resp { + Response::Success { payload } => { + // Send success code (0) + io.write_all(&[ResponseCode::SUCCESS.into()]).await?; - let encoded = match &resp.payload { - super::messages::ResponsePayload::Status(status) => status.as_ssz_bytes(), - super::messages::ResponsePayload::BlocksByRoot(response) => response.as_ssz_bytes(), - }; + let encoded = match &payload { + ResponsePayload::Status(status) => status.as_ssz_bytes(), + ResponsePayload::BlocksByRoot(block) => block.as_ssz_bytes(), + }; - write_payload(io, &encoded).await + write_payload(io, &encoded).await + } + Response::Error { code, message } => { + // Send error code + io.write_all(&[code.into()]).await?; + + // Error messages are SSZ-encoded as List[byte, 256] + let encoded = message.as_ssz_bytes(); + + write_payload(io, &encoded).await + } + } } } diff --git a/crates/net/p2p/src/req_resp/handlers.rs b/crates/net/p2p/src/req_resp/handlers.rs index 336ffc2..698af2c 100644 --- a/crates/net/p2p/src/req_resp/handlers.rs +++ b/crates/net/p2p/src/req_resp/handlers.rs @@ -8,8 +8,8 @@ use ethlambda_types::block::SignedBlockWithAttestation; use ethlambda_types::primitives::TreeHash; use super::{ - BLOCKS_BY_ROOT_PROTOCOL_V1, BlocksByRootRequest, Request, Response, ResponsePayload, - ResponseResult, Status, + BLOCKS_BY_ROOT_PROTOCOL_V1, BlocksByRootRequest, Request, Response, ResponseCode, + ResponsePayload, Status, error_message, }; use crate::{ BACKOFF_MULTIPLIER, INITIAL_BACKOFF_MS, MAX_FETCH_RETRIES, P2PServer, PendingRequest, @@ -32,12 +32,18 @@ pub async fn handle_req_resp_message( handle_blocks_by_root_request(server, request, channel, peer).await; } }, - request_response::Message::Response { response, .. } => match response.payload { - ResponsePayload::Status(status) => { - handle_status_response(status, peer).await; - } - ResponsePayload::BlocksByRoot(blocks) => { - handle_blocks_by_root_response(server, blocks, peer).await; + request_response::Message::Response { response, .. } => match response { + Response::Success { payload } => match payload { + ResponsePayload::Status(status) => { + handle_status_response(status, peer).await; + } + ResponsePayload::BlocksByRoot(block) => { + handle_blocks_by_root_response(server, block, peer).await; + } + }, + Response::Error { code, message } => { + let error_str = String::from_utf8_lossy(&message); + warn!(%peer, ?code, %error_str, "Received error response"); } }, }, @@ -84,7 +90,7 @@ async fn handle_status_request( .req_resp .send_response( channel, - Response::new(ResponseResult::Success, ResponsePayload::Status(our_status)), + Response::success(ResponsePayload::Status(our_status)), ) .unwrap(); } @@ -94,21 +100,47 @@ async fn handle_status_response(status: Status, peer: PeerId) { } async fn handle_blocks_by_root_request( - _server: &mut P2PServer, + server: &mut P2PServer, request: BlocksByRootRequest, - _channel: request_response::ResponseChannel, + channel: request_response::ResponseChannel, peer: PeerId, ) { let num_roots = request.roots.len(); info!(%peer, num_roots, "Received BlocksByRoot request"); - // TODO: Implement signed block storage and send response chunks - // For now, we don't send any response (drop the channel) - // In a full implementation, we would: - // 1. Look up each requested block root - // 2. Send a response chunk for each found block - // 3. Each chunk contains: result byte + encoded SignedBlockWithAttestation - warn!(%peer, num_roots, "BlocksByRoot request received but block storage not implemented"); + // TODO: Support multiple blocks per request (currently only handles first root) + // The protocol supports up to 1024 roots, but our response type only holds one block. + let Some(root) = request.roots.first() else { + debug!(%peer, "BlocksByRoot request with no roots"); + return; + }; + + match server.store.get_signed_block(root) { + Some(signed_block) => { + let slot = signed_block.message.block.slot; + info!(%peer, %root, %slot, "Responding to BlocksByRoot request"); + + if let Err(err) = server.swarm.behaviour_mut().req_resp.send_response( + channel, + Response::success(ResponsePayload::BlocksByRoot(signed_block)), + ) { + warn!(%peer, %root, ?err, "Failed to send BlocksByRoot response"); + } + } + None => { + debug!(%peer, %root, "Block not found for BlocksByRoot request"); + + if let Err(err) = server.swarm.behaviour_mut().req_resp.send_response( + channel, + Response::error( + ResponseCode::RESOURCE_UNAVAILABLE, + error_message("Block not found"), + ), + ) { + warn!(%peer, %root, ?err, "Failed to send RESOURCE_UNAVAILABLE response"); + } + } + } } async fn handle_blocks_by_root_response( diff --git a/crates/net/p2p/src/req_resp/messages.rs b/crates/net/p2p/src/req_resp/messages.rs index f5fc199..cc8ee1e 100644 --- a/crates/net/p2p/src/req_resp/messages.rs +++ b/crates/net/p2p/src/req_resp/messages.rs @@ -13,21 +13,76 @@ pub enum Request { } #[derive(Debug, Clone)] -pub struct Response { - pub result: ResponseResult, - pub payload: ResponsePayload, +#[allow(clippy::large_enum_variant)] +pub enum Response { + Success { + payload: ResponsePayload, + }, + Error { + code: ResponseCode, + message: ErrorMessage, + }, } impl Response { - pub fn new(result: ResponseResult, payload: ResponsePayload) -> Self { - Self { result, payload } + /// Create a success response with the given payload. + pub fn success(payload: ResponsePayload) -> Self { + Self::Success { payload } + } + + /// Create an error response with the given code and message. + pub fn error(code: ResponseCode, message: ErrorMessage) -> Self { + Self::Error { code, message } + } +} + +/// Response codes for req/resp protocol messages. +/// +/// The first byte of every response indicates success or failure: +/// - On success (code 0), the payload contains the requested data. +/// - On failure (codes 1-3), the payload contains an error message. +/// +/// Unknown codes are handled gracefully: +/// - Codes 4-127: Reserved for future use, treat as SERVER_ERROR. +/// - Codes 128-255: Invalid range, treat as INVALID_REQUEST. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct ResponseCode(pub u8); + +impl ResponseCode { + /// Request completed successfully. Payload contains the response data. + pub const SUCCESS: Self = Self(0); + /// Request was malformed or violated protocol rules. + pub const INVALID_REQUEST: Self = Self(1); + /// Server encountered an internal error processing the request. + pub const SERVER_ERROR: Self = Self(2); + /// Requested resource (block, blob, etc.) is not available. + pub const RESOURCE_UNAVAILABLE: Self = Self(3); +} + +impl From for ResponseCode { + fn from(code: u8) -> Self { + Self(code) } } -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum ResponseResult { - Success = 0, - InvalidRequest = 1, +impl From for u8 { + fn from(code: ResponseCode) -> Self { + code.0 + } +} + +impl std::fmt::Debug for ResponseCode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match *self { + Self::SUCCESS => write!(f, "SUCCESS(0)"), + Self::INVALID_REQUEST => write!(f, "INVALID_REQUEST(1)"), + Self::SERVER_ERROR => write!(f, "SERVER_ERROR(2)"), + Self::RESOURCE_UNAVAILABLE => write!(f, "RESOURCE_UNAVAILABLE(3)"), + // Unknown codes: treat 4-127 as SERVER_ERROR, 128-255 as INVALID_REQUEST + Self(code @ 4..=127) => write!(f, "SERVER_ERROR({code})"), + Self(code @ 128..=255) => write!(f, "INVALID_REQUEST({code})"), + } + } } #[derive(Debug, Clone)] @@ -46,9 +101,35 @@ pub struct Status { } type MaxRequestBlocks = typenum::U1024; +type MaxErrorMessageLength = typenum::U256; pub type RequestedBlockRoots = ssz_types::VariableList; +/// Error message type for non-success responses. +/// SSZ-encoded as List[byte, 256] per spec. +pub type ErrorMessage = ssz_types::VariableList; + +/// Helper to create an ErrorMessage from a string. +/// Debug builds panic if message exceeds 256 bytes (programming error). +/// Release builds truncate to 256 bytes. +pub fn error_message(msg: impl AsRef) -> ErrorMessage { + let bytes = msg.as_ref().as_bytes(); + debug_assert!( + bytes.len() <= 256, + "Error message exceeds 256 byte protocol limit: {} bytes. Message: '{}'", + bytes.len(), + msg.as_ref() + ); + + let truncated = if bytes.len() > 256 { + &bytes[..256] + } else { + bytes + }; + + ErrorMessage::new(truncated.to_vec()).expect("error message fits in 256 bytes") +} + #[derive(Debug, Clone, Encode, Decode)] pub struct BlocksByRootRequest { pub roots: RequestedBlockRoots, diff --git a/crates/net/p2p/src/req_resp/mod.rs b/crates/net/p2p/src/req_resp/mod.rs index f78493f..fedd572 100644 --- a/crates/net/p2p/src/req_resp/mod.rs +++ b/crates/net/p2p/src/req_resp/mod.rs @@ -8,5 +8,5 @@ pub use encoding::MAX_COMPRESSED_PAYLOAD_SIZE; pub use handlers::{build_status, fetch_block_from_peer, handle_req_resp_message}; pub use messages::{ BLOCKS_BY_ROOT_PROTOCOL_V1, BlocksByRootRequest, Request, RequestedBlockRoots, Response, - ResponsePayload, ResponseResult, STATUS_PROTOCOL_V1, Status, + ResponseCode, ResponsePayload, STATUS_PROTOCOL_V1, Status, error_message, }; diff --git a/crates/storage/src/api/tables.rs b/crates/storage/src/api/tables.rs index 0b99940..170176f 100644 --- a/crates/storage/src/api/tables.rs +++ b/crates/storage/src/api/tables.rs @@ -3,6 +3,11 @@ pub enum Table { /// Block storage: H256 -> Block Blocks, + /// Block signatures storage: H256 -> BlockSignaturesWithAttestation + /// + /// Stored separately from blocks because the genesis block has no signatures. + /// All other blocks must have an entry in this table. + BlockSignatures, /// State storage: H256 -> State States, /// Known attestations: u64 -> AttestationData @@ -18,8 +23,9 @@ pub enum Table { } /// All table variants. -pub const ALL_TABLES: [Table; 7] = [ +pub const ALL_TABLES: [Table; 8] = [ Table::Blocks, + Table::BlockSignatures, Table::States, Table::LatestKnownAttestations, Table::LatestNewAttestations, diff --git a/crates/storage/src/backend/rocksdb.rs b/crates/storage/src/backend/rocksdb.rs index 72fb159..6790906 100644 --- a/crates/storage/src/backend/rocksdb.rs +++ b/crates/storage/src/backend/rocksdb.rs @@ -13,6 +13,7 @@ use std::sync::Arc; fn cf_name(table: Table) -> &'static str { match table { Table::Blocks => "blocks", + Table::BlockSignatures => "block_signatures", Table::States => "states", Table::LatestKnownAttestations => "latest_known_attestations", Table::LatestNewAttestations => "latest_new_attestations", diff --git a/crates/storage/src/store.rs b/crates/storage/src/store.rs index 6777018..cc03e5d 100644 --- a/crates/storage/src/store.rs +++ b/crates/storage/src/store.rs @@ -4,7 +4,10 @@ use crate::api::{StorageBackend, Table}; use ethlambda_types::{ attestation::AttestationData, - block::{AggregatedSignatureProof, Block, BlockBody}, + block::{ + AggregatedSignatureProof, Block, BlockBody, BlockSignaturesWithAttestation, + BlockWithAttestation, SignedBlockWithAttestation, + }, primitives::{Decode, Encode, H256, TreeHash}, signature::ValidatorSignature, state::{ChainConfig, Checkpoint, State}, @@ -128,44 +131,39 @@ impl Store { let mut batch = backend.begin_write().expect("write batch"); // Metadata + let metadata_entries = vec![ + (KEY_TIME.to_vec(), 0u64.as_ssz_bytes()), + (KEY_CONFIG.to_vec(), anchor_state.config.as_ssz_bytes()), + (KEY_HEAD.to_vec(), anchor_block_root.as_ssz_bytes()), + (KEY_SAFE_TARGET.to_vec(), anchor_block_root.as_ssz_bytes()), + ( + KEY_LATEST_JUSTIFIED.to_vec(), + anchor_checkpoint.as_ssz_bytes(), + ), + ( + KEY_LATEST_FINALIZED.to_vec(), + anchor_checkpoint.as_ssz_bytes(), + ), + ]; batch - .put_batch( - Table::Metadata, - vec![ - (KEY_TIME.to_vec(), 0u64.as_ssz_bytes()), - (KEY_CONFIG.to_vec(), anchor_state.config.as_ssz_bytes()), - (KEY_HEAD.to_vec(), anchor_block_root.as_ssz_bytes()), - (KEY_SAFE_TARGET.to_vec(), anchor_block_root.as_ssz_bytes()), - ( - KEY_LATEST_JUSTIFIED.to_vec(), - anchor_checkpoint.as_ssz_bytes(), - ), - ( - KEY_LATEST_FINALIZED.to_vec(), - anchor_checkpoint.as_ssz_bytes(), - ), - ], - ) + .put_batch(Table::Metadata, metadata_entries) .expect("put metadata"); // Block and state + let block_entries = vec![( + anchor_block_root.as_ssz_bytes(), + anchor_block.as_ssz_bytes(), + )]; batch - .put_batch( - Table::Blocks, - vec![( - anchor_block_root.as_ssz_bytes(), - anchor_block.as_ssz_bytes(), - )], - ) + .put_batch(Table::Blocks, block_entries) .expect("put block"); + + let state_entries = vec![( + anchor_block_root.as_ssz_bytes(), + anchor_state.as_ssz_bytes(), + )]; batch - .put_batch( - Table::States, - vec![( - anchor_block_root.as_ssz_bytes(), - anchor_state.as_ssz_bytes(), - )], - ) + .put_batch(Table::States, state_entries) .expect("put state"); batch.commit().expect("commit"); @@ -296,15 +294,69 @@ impl Store { pub fn insert_block(&mut self, root: H256, block: Block) { let mut batch = self.backend.begin_write().expect("write batch"); + let entries = vec![(root.as_ssz_bytes(), block.as_ssz_bytes())]; + batch.put_batch(Table::Blocks, entries).expect("put block"); + batch.commit().expect("commit"); + } + + // ============ Signed Blocks ============ + + /// Insert a signed block, storing the block and signatures separately. + /// + /// Blocks and signatures are stored in separate tables because the genesis + /// block has no signatures. This allows uniform storage of all blocks while + /// only storing signatures for non-genesis blocks. + /// + /// Takes ownership to avoid cloning large signature data. + pub fn insert_signed_block(&mut self, root: H256, signed_block: SignedBlockWithAttestation) { + // Destructure to extract all components without cloning + let SignedBlockWithAttestation { + message: + BlockWithAttestation { + block, + proposer_attestation, + }, + signature, + } = signed_block; + + let signatures = BlockSignaturesWithAttestation { + proposer_attestation, + signatures: signature, + }; + + let mut batch = self.backend.begin_write().expect("write batch"); + + let block_entries = vec![(root.as_ssz_bytes(), block.as_ssz_bytes())]; batch - .put_batch( - Table::Blocks, - vec![(root.as_ssz_bytes(), block.as_ssz_bytes())], - ) + .put_batch(Table::Blocks, block_entries) .expect("put block"); + + let sig_entries = vec![(root.as_ssz_bytes(), signatures.as_ssz_bytes())]; + batch + .put_batch(Table::BlockSignatures, sig_entries) + .expect("put block signatures"); + batch.commit().expect("commit"); } + /// Get a signed block by combining block and signatures. + /// + /// Returns None if either the block or signatures are not found. + /// Note: Genesis block has no entry in BlockSignatures table. + pub fn get_signed_block(&self, root: &H256) -> Option { + let view = self.backend.begin_read().expect("read view"); + let key = root.as_ssz_bytes(); + + let block_bytes = view.get(Table::Blocks, &key).expect("get")?; + let sig_bytes = view.get(Table::BlockSignatures, &key).expect("get")?; + + let block = Block::from_ssz_bytes(&block_bytes).expect("valid block"); + let signatures = + BlockSignaturesWithAttestation::from_ssz_bytes(&sig_bytes).expect("valid signatures"); + + Some(signatures.to_signed_block(block)) + } + // ============ States ============ /// Iterate over all (root, state) pairs. @@ -332,12 +384,8 @@ impl Store { pub fn insert_state(&mut self, root: H256, state: State) { let mut batch = self.backend.begin_write().expect("write batch"); - batch - .put_batch( - Table::States, - vec![(root.as_ssz_bytes(), state.as_ssz_bytes())], - ) - .expect("put state"); + let entries = vec![(root.as_ssz_bytes(), state.as_ssz_bytes())]; + batch.put_batch(Table::States, entries).expect("put state"); batch.commit().expect("commit"); } @@ -368,11 +416,9 @@ impl Store { pub fn insert_known_attestation(&mut self, validator_id: u64, data: AttestationData) { let mut batch = self.backend.begin_write().expect("write batch"); + let entries = vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())]; batch - .put_batch( - Table::LatestKnownAttestations, - vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())], - ) + .put_batch(Table::LatestKnownAttestations, entries) .expect("put attestation"); batch.commit().expect("commit"); } @@ -404,11 +450,9 @@ impl Store { pub fn insert_new_attestation(&mut self, validator_id: u64, data: AttestationData) { let mut batch = self.backend.begin_write().expect("write batch"); + let entries = vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())]; batch - .put_batch( - Table::LatestNewAttestations, - vec![(validator_id.as_ssz_bytes(), data.as_ssz_bytes())], - ) + .put_batch(Table::LatestNewAttestations, entries) .expect("put attestation"); batch.commit().expect("commit"); } @@ -492,11 +536,9 @@ impl Store { pub fn insert_gossip_signature(&mut self, key: SignatureKey, signature: ValidatorSignature) { let mut batch = self.backend.begin_write().expect("write batch"); + let entries = vec![(encode_signature_key(&key), signature.to_bytes())]; batch - .put_batch( - Table::GossipSignatures, - vec![(encode_signature_key(&key), signature.to_bytes())], - ) + .put_batch(Table::GossipSignatures, entries) .expect("put signature"); batch.commit().expect("commit"); } @@ -540,11 +582,9 @@ impl Store { proofs.push(proof); let mut batch = self.backend.begin_write().expect("write batch"); + let entries = vec![(encode_signature_key(&key), proofs.as_ssz_bytes())]; batch - .put_batch( - Table::AggregatedPayloads, - vec![(encode_signature_key(&key), proofs.as_ssz_bytes())], - ) + .put_batch(Table::AggregatedPayloads, entries) .expect("put proofs"); batch.commit().expect("commit"); }