diff --git a/.github/assets/hive/build_simulators.sh b/.github/assets/hive/build_simulators.sh index dab77772f8e..709ecc51e01 100755 --- a/.github/assets/hive/build_simulators.sh +++ b/.github/assets/hive/build_simulators.sh @@ -11,7 +11,7 @@ go build . # Run each hive command in the background for each simulator and wait echo "Building images" -./hive -client reth --sim "ethereum/eest" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0/fixtures_develop.tar.gz --sim.buildarg branch=v5.1.0 -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/eest" --sim.buildarg fixtures=https://github.com/ethereum/execution-spec-tests/releases/download/v5.3.0/fixtures_develop.tar.gz --sim.buildarg branch=v5.3.0 -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & ./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & ./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true & diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index 6a580d9a110..ae3817cfc3d 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -41,18 +41,14 @@ engine-cancun: sync: [] -# https://github.com/ethereum/hive/issues/1277 -engine-auth: - - "JWT Authentication: No time drift, correct secret (Paris) (reth)" - - "JWT Authentication: Negative time drift, within limit, correct secret (Paris) (reth)" - - "JWT Authentication: Positive time drift, within limit, correct secret (Paris) (reth)" +engine-auth: [] # 7702 test - no fix: it’s too expensive to check whether the storage is empty on each creation # 6110 related tests - may start passing when fixtures improve # 7002 related tests - post-fork test, should fix for spec compliance but not # realistic on mainnet # 7251 related tests - modified contract, not necessarily practical on mainnet, -# 7594: https://github.com/paradigmxyz/reth/issues/18471 +# 7594: https://github.com/paradigmxyz/reth/issues/18975 # worth re-visiting when more of these related tests are passing eest/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 3039541c8f1..deec064b639 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -13,7 +13,7 @@ on: jobs: build: runs-on: ubuntu-latest - timeout-minutes: 60 + timeout-minutes: 90 steps: - name: Checkout uses: actions/checkout@v5 diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 175a9e0d541..8f9d8461f59 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -33,14 +33,39 @@ jobs: repository: ethereum/hive path: hivetests + - name: Get hive commit hash + id: hive-commit + run: echo "hash=$(cd hivetests && git rev-parse HEAD)" >> $GITHUB_OUTPUT + - uses: actions/setup-go@v6 with: go-version: "^1.13.1" - run: go version + - name: Restore hive assets cache + id: cache-hive + uses: actions/cache@v4 + with: + path: ./hive_assets + key: hive-assets-${{ steps.hive-commit.outputs.hash }}-${{ hashFiles('.github/assets/hive/build_simulators.sh') }} + - name: Build hive assets + if: steps.cache-hive.outputs.cache-hit != 'true' run: .github/assets/hive/build_simulators.sh + - name: Load cached Docker images + if: steps.cache-hive.outputs.cache-hit == 'true' + run: | + cd hive_assets + for tar_file in *.tar; do + if [ -f "$tar_file" ]; then + echo "Loading $tar_file..." + docker load -i "$tar_file" + fi + done + # Make hive binary executable + chmod +x hive + - name: Upload hive assets uses: actions/upload-artifact@v4 with: @@ -127,6 +152,8 @@ jobs: limit: .*tests/homestead.* - sim: ethereum/eest/consume-engine limit: .*tests/frontier.* + - sim: ethereum/eest/consume-engine + limit: .*tests/paris.* # consume-rlp - sim: ethereum/eest/consume-rlp @@ -145,6 +172,8 @@ jobs: limit: .*tests/homestead.* - sim: ethereum/eest/consume-rlp limit: .*tests/frontier.* + - sim: ethereum/eest/consume-rlp + limit: .*tests/paris.* needs: - prepare-reth - prepare-hive diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 029b145f07b..ba201679b5a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,7 +18,6 @@ env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth - REPRODUCIBLE_IMAGE_NAME: ${{ github.repository_owner }}/reth-reproducible CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth @@ -74,10 +73,6 @@ jobs: os: ubuntu-24.04 profile: maxperf allow_fail: false - - target: x86_64-unknown-linux-gnu - os: ubuntu-24.04 - profile: reproducible - allow_fail: false - target: aarch64-unknown-linux-gnu os: ubuntu-24.04 profile: maxperf @@ -124,13 +119,7 @@ jobs: echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV - name: Build Reth - if: ${{ !(matrix.build.binary == 'op-reth' && matrix.configs.profile == 'reproducible') }} - run: | - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ matrix.configs.profile }}" == "reproducible" ]]; then - make build-reth-reproducible - else - make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - fi + run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - name: Build Reth deb package if: ${{ matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} @@ -141,13 +130,6 @@ jobs: mkdir artifacts [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" - # Handle reproducible builds which always target x86_64-unknown-linux-gnu - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ matrix.configs.profile }}" == "reproducible" ]]; then - mv "target/x86_64-unknown-linux-gnu/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - else - mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - fi - # Move deb packages if they exist if [[ "${{ matrix.build.binary }}" == "reth" && "${{ env.DEB_SUPPORTED_TARGETS }}" == *"${{ matrix.configs.target }}"* ]]; then mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ./artifacts diff --git a/Cargo.lock b/Cargo.lock index 6516e410911..9c4746f0b38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 4 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" dependencies = [ "gimli", ] @@ -97,9 +97,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.9" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8ff73a143281cb77c32006b04af9c047a6b8fe5860e85a88ad325328965355" +checksum = "fc7aacbb0ac0f76aaa64d1e1412f778c0574f241e4073b2a3e09c605884c9b90" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59094911f05dbff1cf5b29046a00ef26452eccc8d47136d50a47c0cf22f00c85" +checksum = "6a0dd3ed764953a6b20458b2b7abbfdc93d20d14b38babe1a70fe631a443a9f1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -139,9 +139,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903cb8f728107ca27c816546f15be38c688df3c381d7bd1a4a9f215effc1ddb4" +checksum = "9556182afa73cddffa91e64a5aa9508d5e8c912b3a15f26998d2388a824d2c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -237,9 +237,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac7f1c9a1ccc7f3e03c36976455751a6166a4f0d2d2c530c3f87dfe7d0cdc836" +checksum = "305fa99b538ca7006b0c03cfed24ec6d82beda67aac857ef4714be24231d15e6" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -256,15 +256,15 @@ dependencies = [ "ethereum_ssz_derive", "serde", "serde_with", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", ] [[package]] name = "alloy-evm" -version = "0.21.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a5f67ee74999aa4fe576a83be1996bdf74a30fce3d248bf2007d6fc7dae8aa" +checksum = "24a48fa6a4a5a69ae8e46c0ae60851602c5016baa3379d076c76e4c2f3b889f7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -299,9 +299,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "889eb3949b58368a09d4f16931c660275ef5fb08e5fbd4a96573b19c7085c41f" +checksum = "4b16ee6b2c7d39da592d30a5f9607a83f50ee5ec2a2c301746cc81e91891f4ca" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -366,9 +366,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46e9374c667c95c41177602ebe6f6a2edd455193844f011d973d374b65501b38" +checksum = "223612259a080160ce839a4e5df0125ca403a1d5e7206cc911cea54af5d769aa" dependencies = [ "alloy-consensus", "alloy-eips", @@ -379,9 +379,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.21.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17aaeb600740c181bf29c9f138f9b228d115ea74fa6d0f0343e1952f1a766968" +checksum = "d1e0abe910a26d1b3686f4f6ad58287ce8c7fb85b08603d8c832869f02eb3d79" dependencies = [ "alloy-consensus", "alloy-eips", @@ -392,13 +392,14 @@ dependencies = [ "op-alloy-consensus", "op-revm", "revm", + "thiserror 2.0.16", ] [[package]] name = "alloy-op-hardforks" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "599c1d7dfbccb66603cb93fde00980d12848d32fe5e814f50562104a92df6487" +checksum = "af8bb236fc008fd3b83b2792e30ae79617a99ffc4c3f584f0c9b4ce0a2da52de" dependencies = [ "alloy-chains", "alloy-hardforks", @@ -422,7 +423,7 @@ dependencies = [ "foldhash 0.2.0", "getrandom 0.3.3", "hashbrown 0.16.0", - "indexmap 2.11.1", + "indexmap 2.11.4", "itoa", "k256", "keccak-asm", @@ -654,9 +655,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db46b0901ee16bbb68d986003c66dcb74a12f9d9b3c44f8e85d51974f2458f0f" +checksum = "6d7d47bca1a2a1541e4404aa38b7e262bb4dffd9ac23b4f178729a4ddc5a5caa" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -691,9 +692,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f10620724bd45f80c79668a8cdbacb6974f860686998abce28f6196ae79444" +checksum = "c331c8e48665607682e8a9549a2347c13674d4fbcbdc342e7032834eba2424f4" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -717,9 +718,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5413814be7a22fbc81e0f04a2401fcc3eb25e56fd53b04683e8acecc6e1fe01b" +checksum = "6a8468f1a7f9ee3bae73c24eead0239abea720dbf7779384b9c7e20d51bfb6b0" dependencies = [ "alloy-primitives", "arbitrary", @@ -784,7 +785,7 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.11.1", + "indexmap 2.11.4", "proc-macro-error2", "proc-macro2", "quote", @@ -936,9 +937,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64c09ec565a90ed8390d82aa08cd3b22e492321b96cb4a3d4f58414683c9e2f" +checksum = "7bf39928a5e70c9755d6811a2928131b53ba785ad37c8bf85c90175b5d43b818" dependencies = [ "alloy-primitives", "darling 0.21.3", @@ -1014,9 +1015,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.99" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "aquamarine" @@ -1371,9 +1372,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.30" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977eb15ea9efd848bb8a4a1a2500347ed7f0bf794edf0dc3ddcf439f43d36b23" +checksum = "5a89bce6054c720275ac2432fbba080a66a2106a44a1b804553930ca6909f4e0" dependencies = [ "compression-codecs", "compression-core", @@ -1491,9 +1492,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.75" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" dependencies = [ "addr2line", "cfg-if", @@ -1501,7 +1502,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", + "windows-link 0.2.0", ] [[package]] @@ -1677,15 +1678,6 @@ dependencies = [ "wyz", ] -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -1706,9 +1698,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" +checksum = "dcdb4c7013139a150f9fc55d123186dbfaba0d912817466282c73ac49e71fb45" dependencies = [ "cc", "glob", @@ -1726,7 +1718,7 @@ dependencies = [ "boa_interner", "boa_macros", "boa_string", - "indexmap 2.11.1", + "indexmap 2.11.4", "num-bigint", "rustc-hash 2.1.1", ] @@ -1752,7 +1744,7 @@ dependencies = [ "fast-float2", "hashbrown 0.15.5", "icu_normalizer 1.5.0", - "indexmap 2.11.1", + "indexmap 2.11.4", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1798,9 +1790,9 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.15.5", - "indexmap 2.11.1", + "indexmap 2.11.4", "once_cell", - "phf", + "phf 0.11.3", "rustc-hash 2.1.1", "static_assertions", ] @@ -1891,7 +1883,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ - "sha2 0.10.9", + "sha2", "tinyvec", ] @@ -1961,9 +1953,9 @@ dependencies = [ [[package]] name = "c-kzg" -version = "2.1.5" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00bf4b112b07b505472dbefd19e37e53307e2bfed5a79e0cc161d58ccd0e687" +checksum = "137a2a2878ed823ef1bd73e5441e245602aae5360022113b8ad259ca4b5b8727" dependencies = [ "arbitrary", "blst", @@ -1977,11 +1969,11 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.12" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0b03af37dad7a14518b7691d81acb0f8222604ad3d1b02f6b4bed5188c0cd5" +checksum = "e1de8bc0aa9e9385ceb3bf0c152e3a9b9544f6c4a912c8ae504e80c1f0368603" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -2001,7 +1993,7 @@ checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" dependencies = [ "camino", "cargo-platform", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_json", ] @@ -2014,7 +2006,7 @@ checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_json", "thiserror 2.0.16", @@ -2143,9 +2135,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.47" +version = "4.5.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931" +checksum = "e2134bb3ea021b78629caa971416385309e0131b351b25e01dc16fb54e1b5fae" dependencies = [ "clap_builder", "clap_derive", @@ -2153,9 +2145,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.47" +version = "4.5.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6" +checksum = "c2ba64afa3c0a6df7fa517765e31314e983f51dda798ffba27b988194fb65dc9" dependencies = [ "anstream", "anstyle", @@ -2257,7 +2249,7 @@ dependencies = [ "hmac", "k256", "serde", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ] @@ -2273,7 +2265,7 @@ dependencies = [ "once_cell", "pbkdf2", "rand 0.8.5", - "sha2 0.10.9", + "sha2", "thiserror 1.0.69", ] @@ -2291,7 +2283,7 @@ dependencies = [ "generic-array", "ripemd", "serde", - "sha2 0.10.9", + "sha2", "sha3", "thiserror 1.0.69", ] @@ -2309,7 +2301,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -2349,9 +2341,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.30" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "485abf41ac0c8047c07c87c72c8fb3eb5197f6e9d7ded615dfd1a00ae00a0f64" +checksum = "ef8a506ec4b81c460798f572caead636d57d3d7e940f998160f52bd254bf2d23" dependencies = [ "brotli", "compression-core", @@ -2399,15 +2391,14 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dccd746bf9b1038c0507b7cec21eb2b11222db96a2902c96e8c185d6d20fb9c4" +checksum = "b6407bff74dea37e0fa3dc1c1c974e5d46405f0c987bf9997a0762adce71eda6" dependencies = [ "cfg-if", "cpufeatures", - "hex", "proptest", - "serde", + "serde_core", ] [[package]] @@ -2841,12 +2832,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d630bccd429a5bb5a64b5e94f693bfc48c9f8566418fda4c494cc94f911f87cc" +checksum = "a41953f86f8a05768a6cda24def994fd2f424b04ec5c719cf89989779f199071" dependencies = [ "powerfmt", - "serde", + "serde_core", ] [[package]] @@ -2956,7 +2947,7 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.4", + "block-buffer", "const-oid", "crypto-common", "subtle", @@ -2990,7 +2981,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.61.0", + "windows-sys 0.59.0", ] [[package]] @@ -3110,7 +3101,7 @@ dependencies = [ "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.9", + "sha2", "subtle", "zeroize", ] @@ -3206,7 +3197,7 @@ checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "encoder-standard" version = "0.1.0" -source = "git+https://github.com/scroll-tech/da-codec#b4cce5c5d17845fc6f4f6ec422d559470a09dca9" +source = "git+https://github.com/scroll-tech/da-codec#ef47e96e3a94f7eaa0772a8de2496b8eb7e0e5b9" dependencies = [ "zstd", ] @@ -3287,7 +3278,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3307,7 +3298,7 @@ checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" dependencies = [ "cpufeatures", "ring", - "sha2 0.10.9", + "sha2", ] [[package]] @@ -4120,7 +4111,7 @@ dependencies = [ "js-sys", "libc", "r-efi", - "wasi 0.14.5+wasi-0.2.4", + "wasi 0.14.7+wasi-0.2.4", "wasm-bindgen", ] @@ -4136,9 +4127,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.1" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" [[package]] name = "git2" @@ -4238,7 +4229,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.11.1", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -4339,9 +4330,6 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -dependencies = [ - "serde", -] [[package]] name = "hex-conservative" @@ -4572,9 +4560,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" dependencies = [ "base64 0.22.1", "bytes", @@ -4608,7 +4596,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.57.0", + "windows-core 0.58.0", ] [[package]] @@ -4919,14 +4907,15 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.1" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206a8042aec68fa4a62e8d3f7aa4ceb508177d9324faf261e1959e495b7a1921" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "serde", + "serde_core", ] [[package]] @@ -5064,7 +5053,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5140,9 +5129,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.78" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c0b063578492ceec17683ef2f8c5e89121fbd0b172cbc280635ab7567db2738" +checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" dependencies = [ "once_cell", "wasm-bindgen", @@ -5346,7 +5335,7 @@ dependencies = [ "elliptic-curve", "once_cell", "serdect", - "sha2 0.10.9", + "sha2", "signature", ] @@ -5397,9 +5386,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.175" +version = "0.2.176" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" +checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174" [[package]] name = "libgit2-sys" @@ -5415,12 +5404,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-link 0.2.0", ] [[package]] @@ -5442,7 +5431,7 @@ dependencies = [ "k256", "multihash", "quick-protobuf", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", "tracing", "zeroize", @@ -5461,61 +5450,15 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ "bitflags 2.9.4", "libc", "redox_syscall", ] -[[package]] -name = "libsecp256k1" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79019718125edc905a079a70cfa5f3820bc76139fc91d6f9abc27ea2a887139" -dependencies = [ - "arrayref", - "base64 0.22.1", - "digest 0.9.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.5", - "serde", - "sha2 0.9.9", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" -dependencies = [ - "libsecp256k1-core", -] - [[package]] name = "libz-sys" version = "1.1.22" @@ -5684,9 +5627,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.5" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "memmap2" @@ -5735,7 +5678,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd7399781913e5393588a8d8c6a2867bf85fb38eaf2502fdce465aad2dc6f034" dependencies = [ "base64 0.22.1", - "indexmap 2.11.1", + "indexmap 2.11.4", "metrics", "metrics-util", "quanta", @@ -5767,7 +5710,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.5", - "indexmap 2.11.1", + "indexmap 2.11.4", "metrics", "ordered-float", "quanta", @@ -5878,20 +5821,19 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.10" +version = "0.12.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" dependencies = [ "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", - "loom", + "equivalent", "parking_lot", "portable-atomic", "rustc_version 0.4.1", "smallvec", "tagptr", - "thiserror 1.0.69", "uuid", ] @@ -6134,9 +6076,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0418987d1aaed324d95b4beffc93635e19be965ed5d63ec07a35980fe3b71a4" +checksum = "bfa11e84403164a9f12982ab728f3c67c6fd4ab5b5f0254ffc217bdbd3b28ab0" dependencies = [ "alloy-rlp", "arbitrary", @@ -6149,9 +6091,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", ] @@ -6292,8 +6234,8 @@ dependencies = [ [[package]] name = "op-revm" -version = "10.1.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "revm", @@ -6308,9 +6250,9 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.73" +version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ "bitflags 2.9.4", "cfg-if", @@ -6340,9 +6282,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.109" +version = "0.9.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" dependencies = [ "cc", "libc", @@ -6352,9 +6294,9 @@ dependencies = [ [[package]] name = "opentelemetry" -version = "0.29.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" +checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0" dependencies = [ "futures-core", "futures-sink", @@ -6366,25 +6308,23 @@ dependencies = [ [[package]] name = "opentelemetry-http" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed" +checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d" dependencies = [ "async-trait", "bytes", "http", "opentelemetry", "reqwest", - "tracing", ] [[package]] name = "opentelemetry-otlp" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656" +checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf" dependencies = [ - "futures-core", "http", "opentelemetry", "opentelemetry-http", @@ -6398,38 +6338,36 @@ dependencies = [ [[package]] name = "opentelemetry-proto" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3" +checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f" dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost", "tonic", + "tonic-prost", ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b29a9f89f1a954936d5aa92f19b2feec3c8f3971d3e96206640db7f9706ae3" +checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846" [[package]] name = "opentelemetry_sdk" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" +checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd" dependencies = [ "futures-channel", "futures-executor", "futures-util", - "glob", "opentelemetry", "percent-encoding", "rand 0.9.2", - "serde_json", "thiserror 2.0.16", - "tracing", ] [[package]] @@ -6456,7 +6394,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "primeorder", - "sha2 0.10.9", + "sha2", ] [[package]] @@ -6587,8 +6525,18 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_macros", - "phf_shared", + "phf_macros 0.11.3", + "phf_shared 0.11.3", +] + +[[package]] +name = "phf" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" +dependencies = [ + "phf_macros 0.13.1", + "phf_shared 0.13.1", "serde", ] @@ -6598,18 +6546,41 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared", + "phf_shared 0.11.3", "rand 0.8.5", ] +[[package]] +name = "phf_generator" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" +dependencies = [ + "fastrand 2.3.0", + "phf_shared 0.13.1", +] + [[package]] name = "phf_macros" version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ - "phf_generator", - "phf_shared", + "phf_generator 0.11.3", + "phf_shared 0.11.3", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "phf_macros" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" +dependencies = [ + "phf_generator 0.13.1", + "phf_shared 0.13.1", "proc-macro2", "quote", "syn 2.0.106", @@ -6624,6 +6595,15 @@ dependencies = [ "siphasher", ] +[[package]] +name = "phf_shared" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.1.10" @@ -6799,11 +6779,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit", + "toml_edit 0.23.6", ] [[package]] @@ -6864,9 +6844,9 @@ dependencies = [ [[package]] name = "proptest" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" +checksum = "2bb0be07becd10686a0bb407298fb425360a5c44a663774406340c59a22de4ce" dependencies = [ "bit-set", "bit-vec", @@ -6916,9 +6896,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", "prost-derive", @@ -6926,9 +6906,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools 0.14.0", @@ -7030,7 +7010,7 @@ dependencies = [ "once_cell", "socket2 0.6.0", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7284,9 +7264,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.2" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" dependencies = [ "aho-corasick", "memchr", @@ -7296,9 +7276,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" +checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" dependencies = [ "aho-corasick", "memchr", @@ -7372,9 +7352,9 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" +checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" [[package]] name = "reth" @@ -8068,7 +8048,7 @@ dependencies = [ "rand 0.8.5", "reth-network-peers", "secp256k1 0.30.0", - "sha2 0.10.9", + "sha2", "sha3", "thiserror 2.0.16", "tokio", @@ -8287,7 +8267,7 @@ dependencies = [ "futures-util", "reqwest", "reth-fs-util", - "sha2 0.10.9", + "sha2", "tempfile", "test-case", "tokio", @@ -8484,7 +8464,7 @@ dependencies = [ "reth-primitives-traits", "serde", "serde_json", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", ] @@ -8753,6 +8733,7 @@ name = "reth-invalid-block-hooks" version = "1.8.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -8760,18 +8741,24 @@ dependencies = [ "futures", "jsonrpsee", "pretty_assertions", + "reth-chainspec", "reth-engine-primitives", + "reth-ethereum-primitives", "reth-evm", + "reth-evm-ethereum", "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", + "reth-testing-utils", "reth-tracing", "reth-trie", + "revm", "revm-bytecode", "revm-database", "serde", "serde_json", + "tempfile", ] [[package]] @@ -9605,7 +9592,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", "tracing", ] @@ -9903,7 +9890,6 @@ dependencies = [ "reth-errors", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", - "reth-evm", "reth-execution-types", "reth-fs-util", "reth-metrics", @@ -10118,7 +10104,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "sha2 0.10.9", + "sha2", "thiserror 2.0.16", "tokio", "tokio-stream", @@ -10532,7 +10518,7 @@ dependencies = [ "scroll-alloy-hardforks", "scroll-alloy-rpc-types-engine", "serde", - "sha2 0.10.9", + "sha2", ] [[package]] @@ -10625,6 +10611,7 @@ dependencies = [ "scroll-alloy-evm", "scroll-alloy-hardforks", "scroll-alloy-network", + "scroll-alloy-rpc-types", "scroll-alloy-rpc-types-engine", "serde_json", "tokio", @@ -11023,18 +11010,21 @@ version = "1.8.2" dependencies = [ "clap", "eyre", + "reth-tracing-otlp", "rolling-file", "tracing", "tracing-appender", "tracing-journald", "tracing-logfmt", "tracing-subscriber 0.3.20", + "url", ] [[package]] name = "reth-tracing-otlp" version = "1.8.2" dependencies = [ + "eyre", "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", @@ -11042,6 +11032,7 @@ dependencies = [ "tracing", "tracing-opentelemetry", "tracing-subscriber 0.3.20", + "url", ] [[package]] @@ -11189,6 +11180,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "codspeed-criterion-compat", + "crossbeam-channel", "dashmap 6.1.0", "derive_more", "itertools 0.14.0", @@ -11283,8 +11275,8 @@ dependencies = [ [[package]] name = "revm" -version = "29.0.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "30.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "revm-bytecode", "revm-context", @@ -11301,19 +11293,19 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "6.2.2" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "bitvec", - "phf", + "phf 0.13.1", "revm-primitives", "serde", ] [[package]] name = "revm-context" -version = "9.1.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "10.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "bitvec", "cfg-if", @@ -11328,8 +11320,8 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "10.2.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11343,8 +11335,8 @@ dependencies = [ [[package]] name = "revm-database" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "9.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11356,8 +11348,8 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "8.0.2" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "either", @@ -11368,8 +11360,8 @@ dependencies = [ [[package]] name = "revm-handler" -version = "10.0.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "derive-where", @@ -11386,8 +11378,8 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "10.0.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "either", @@ -11403,9 +11395,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.29.2" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fdb678b03faa678a7007a7c761a78efa9ca9adcd9434ef3d1ad894aec6e43d1" +checksum = "0ce1228a7989cc3d9af84c0de2abe39680a252c265877e67d2f0fb4f392cb690" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -11423,19 +11415,20 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "25.0.3" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "27.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "revm-bytecode", "revm-context-interface", "revm-primitives", + "revm-state", "serde", ] [[package]] name = "revm-precompile" -version = "27.0.0" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "28.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11448,19 +11441,18 @@ dependencies = [ "c-kzg", "cfg-if", "k256", - "libsecp256k1", "p256", "revm-primitives", "ripemd", "rug", "secp256k1 0.31.1", - "sha2 0.10.9", + "sha2", ] [[package]] name = "revm-primitives" -version = "20.2.1" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "21.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "alloy-primitives", "num_enum", @@ -11471,7 +11463,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm#919aa258ae00c5533380c9d866e8acc2cb95f8bc" +source = "git+https://github.com/scroll-tech/scroll-revm?tag=scroll-v91#a1ac004adf0019d9926defc4e31e6a76a7e558f7" dependencies = [ "auto_impl", "enumn", @@ -11484,8 +11476,8 @@ dependencies = [ [[package]] name = "revm-state" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm#51f65cca104d85ea41125e88d58ece665d1f43c1" +version = "8.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "bitflags 2.9.4", "revm-bytecode", @@ -11717,7 +11709,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.26", + "semver 1.0.27", ] [[package]] @@ -11730,7 +11722,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11743,14 +11735,14 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.31" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "log", "once_cell", @@ -11770,7 +11762,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.4.0", + "security-framework 3.5.0", ] [[package]] @@ -11798,10 +11790,10 @@ dependencies = [ "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework 3.4.0", + "security-framework 3.5.0", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11812,9 +11804,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.5" +version = "0.103.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a37813727b78798e53c2bec3f5e8fe12a6d6f8389bf9ca7802add4c9905ad8" +checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" dependencies = [ "ring", "rustls-pki-types", @@ -11866,7 +11858,7 @@ version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.61.0", + "windows-sys 0.61.1", ] [[package]] @@ -12136,9 +12128,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b369d18893388b345804dc0007963c99b7d665ae71d275812d828c6f089640" +checksum = "cc198e42d9b7510827939c9a15f5062a0c913f3371d765977e586d2fe6c16f4a" dependencies = [ "bitflags 2.9.4", "core-foundation 0.10.1", @@ -12168,11 +12160,12 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" dependencies = [ "serde", + "serde_core", ] [[package]] @@ -12237,14 +12230,15 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.144" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56177480b00303e689183f110b4e727bb4211d692c62d4fcd16d02be93077d40" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.11.1", + "indexmap 2.11.4", "itoa", "memchr", "ryu", + "serde", "serde_core", ] @@ -12282,15 +12276,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.14.0" +version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" +checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.11.1", + "indexmap 2.11.4", "schemars 0.9.0", "schemars 1.0.4", "serde", @@ -12302,11 +12296,11 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.14.0" +version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" +checksum = "327ada00f7d64abaac1e55a6911e90cf665aa051b9a561c7006c157f4633135e" dependencies = [ - "darling 0.20.11", + "darling 0.21.3", "proc-macro2", "quote", "syn 2.0.106", @@ -12333,19 +12327,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha2" version = "0.10.9" @@ -12785,15 +12766,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.22.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84fa4d11fadde498443cca10fd3ac23c951f0dc59e080e9f4b93d4df4e4eea53" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand 2.3.0", "getrandom 0.3.3", "once_cell", "rustix 1.1.2", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -12978,11 +12959,12 @@ dependencies = [ [[package]] name = "time" -version = "0.3.43" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bde6f1ec10e72d583d91623c939f623002284ef622b87de38cfd546cbf2031" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", + "itoa", "js-sys", "libc", "num-conv", @@ -13106,9 +13088,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ "rustls", "tokio", @@ -13166,8 +13148,8 @@ checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", - "toml_datetime", - "toml_edit", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", ] [[package]] @@ -13179,20 +13161,50 @@ dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1" +dependencies = [ + "serde_core", +] + [[package]] name = "toml_edit" version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.11.1", + "indexmap 2.11.4", "serde", "serde_spanned", - "toml_datetime", + "toml_datetime 0.6.11", "toml_write", "winnow", ] +[[package]] +name = "toml_edit" +version = "0.23.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3effe7c0e86fdff4f69cdd2ccc1b96f933e24811c5441d44904e8683e27184b" +dependencies = [ + "indexmap 2.11.4", + "toml_datetime 0.7.2", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627" +dependencies = [ + "winnow", +] + [[package]] name = "toml_write" version = "0.1.2" @@ -13201,9 +13213,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" -version = "0.12.3" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" dependencies = [ "async-trait", "base64 0.22.1", @@ -13213,13 +13225,24 @@ dependencies = [ "http-body-util", "percent-encoding", "pin-project", - "prost", + "sync_wrapper", "tokio-stream", "tower-layer", "tower-service", "tracing", ] +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost", + "tonic", +] + [[package]] name = "tower" version = "0.5.2" @@ -13229,7 +13252,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap 2.11.1", + "indexmap 2.11.4", "pin-project-lite", "slab", "sync_wrapper", @@ -13374,15 +13397,16 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.30.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8e764bd6f5813fd8bebc3117875190c5b0415be8f7f8059bffb6ecd979c444" +checksum = "1e6e5658463dd88089aba75c7791e1d3120633b1bfde22478b28f625a9bb1b8e" dependencies = [ "js-sys", - "once_cell", "opentelemetry", "opentelemetry_sdk", + "rustversion", "smallvec", + "thiserror 2.0.16", "tracing", "tracing-core", "tracing-log", @@ -13806,27 +13830,27 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" -version = "0.14.5+wasi-0.2.4" +version = "0.14.7+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4494f6290a82f5fe584817a676a34b9d6763e8d9d18204009fb31dceca98fd4" +checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" dependencies = [ "wasip2", ] [[package]] name = "wasip2" -version = "1.0.0+wasi-0.2.4" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03fa2761397e5bd52002cd7e73110c71af2109aca4e521a9f40473fe685b0a24" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.101" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e14915cadd45b529bb8d1f343c4ed0ac1de926144b746e2710f9cd05df6603b" +checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" dependencies = [ "cfg-if", "once_cell", @@ -13837,9 +13861,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.101" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28d1ba982ca7923fd01448d5c30c6864d0a14109560296a162f80f305fb93bb" +checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" dependencies = [ "bumpalo", "log", @@ -13851,9 +13875,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.51" +version = "0.4.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca85039a9b469b38336411d6d6ced91f3fc87109a2a27b0c197663f5144dffe" +checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" dependencies = [ "cfg-if", "js-sys", @@ -13864,9 +13888,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.101" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c3d463ae3eff775b0c45df9da45d68837702ac35af998361e2c84e7c5ec1b0d" +checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -13874,9 +13898,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.101" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bb4ce89b08211f923caf51d527662b75bdc9c9c7aab40f86dcb9fb85ac552aa" +checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", @@ -13887,9 +13911,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.101" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f143854a3b13752c6950862c906306adb27c7e839f7414cec8fea35beab624c1" +checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" dependencies = [ "unicode-ident", ] @@ -13923,9 +13947,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.78" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e4b637749ff0d92b8fad63aa1f7cff3cbe125fd49c175cd6345e7272638b12" +checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" dependencies = [ "js-sys", "wasm-bindgen", @@ -14087,8 +14111,8 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ - "windows-implement 0.60.0", - "windows-interface 0.59.1", + "windows-implement 0.60.1", + "windows-interface 0.59.2", "windows-link 0.1.3", "windows-result 0.3.4", "windows-strings 0.4.2", @@ -14129,9 +14153,9 @@ dependencies = [ [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "edb307e42a74fb6de9bf3a02d9712678b22399c87e6fa869d6dfcd8c1b7754e0" dependencies = [ "proc-macro2", "quote", @@ -14162,9 +14186,9 @@ dependencies = [ [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "c0abd1ddbc6964ac14db11c7213d6532ef34bd9aa042c2e5935f59d7908b46a5" dependencies = [ "proc-macro2", "quote", @@ -14292,14 +14316,14 @@ version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.53.3", + "windows-targets 0.53.4", ] [[package]] name = "windows-sys" -version = "0.61.0" +version = "0.61.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" +checksum = "6f109e41dd4a3c848907eb83d5a42ea98b3769495597450cf6d153507b166f0f" dependencies = [ "windows-link 0.2.0", ] @@ -14352,11 +14376,11 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.3" +version = "0.53.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +checksum = "2d42b7b7f66d2a06854650af09cfdf8713e427a439c97ad65a6375318033ac4b" dependencies = [ - "windows-link 0.1.3", + "windows-link 0.2.0", "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", "windows_i686_gnu 0.53.0", @@ -14577,9 +14601,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.45.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c573471f125075647d03df72e026074b7203790d41351cd6edc96f46bcccd36" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "write16" @@ -14629,9 +14653,9 @@ dependencies = [ [[package]] name = "xattr" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" dependencies = [ "libc", "rustix 1.1.2", diff --git a/Cargo.toml b/Cargo.toml index e0a1c7da626..fb12f7372c5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -473,7 +473,8 @@ reth-storage-errors = { path = "crates/storage/errors", default-features = false reth-tasks = { path = "crates/tasks" } reth-testing-utils = { path = "testing/testing-utils" } reth-tokio-util = { path = "crates/tokio-util" } -reth-tracing = { path = "crates/tracing" } +reth-tracing = { path = "crates/tracing", default-features = false } +reth-tracing-otlp = { path = "crates/tracing-otlp" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie/trie", default-features = false } reth-trie-common = { path = "crates/trie/common", default-features = false } @@ -486,32 +487,32 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { git = "https://github.com/scroll-tech/revm", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } -revm-bytecode = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-database = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-state = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-primitives = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-interpreter = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-inspector = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-context = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-context-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-database-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } -op-revm = { git = "https://github.com/scroll-tech/revm", default-features = false } -revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", default-features = false } -revm-inspectors = "0.29.0" +revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } +revm-bytecode = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-database = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-state = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-primitives = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-interpreter = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-inspector = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-context = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-context-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-database-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +op-revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91", default-features = false } +revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", tag = "scroll-v91", default-features = false } +revm-inspectors = "0.31.0" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.21.2", default-features = false } -alloy-primitives = { version = "1.3.1", default-features = false, features = ["map-foldhash"] } +alloy-evm = { version = "0.22.0", default-features = false } +alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } -alloy-sol-macro = "1.3.1" -alloy-sol-types = { version = "1.3.1", default-features = false } +alloy-sol-macro = "1.4.1" +alloy-sol-types = { version = "1.4.1", default-features = false } alloy-trie = { version = "0.9.1", default-features = false } -alloy-hardforks = "0.3.5" +alloy-hardforks = "0.4.0" alloy-consensus = { version = "1.0.37", default-features = false } alloy-contract = { version = "1.0.37", default-features = false } @@ -563,8 +564,8 @@ reth-scroll-trie = { path = "crates/scroll/trie" } reth-scroll-txpool = { path = "crates/scroll/txpool" } # op -alloy-op-evm = { version = "0.21.2", default-features = false } -alloy-op-hardforks = "0.3.5" +alloy-op-evm = { version = "0.22.0", default-features = false } +alloy-op-hardforks = "0.4.0" op-alloy-rpc-types = { version = "0.20.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.20.0", default-features = false } op-alloy-network = { version = "0.20.0", default-features = false } @@ -691,6 +692,13 @@ c-kzg = "2.1.4" # config toml = "0.8" +# otlp obs +opentelemetry_sdk = "0.31" +opentelemetry = "0.31" +opentelemetry-otlp = "0.31" +opentelemetry-semantic-conventions = "0.31" +tracing-opentelemetry = "0.32" + # misc-testing arbitrary = "1.3" assert_matches = "1.5.0" @@ -763,8 +771,8 @@ walkdir = "2.3.3" vergen-git2 = "1.0.5" [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm" } -op-revm = { git = "https://github.com/scroll-tech/revm" } +revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" } +op-revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" } # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 335f7de6195..31ed8bda5cd 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -1380,8 +1380,7 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_empty() { let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); - let chain: Vec<_> = state.canonical_chain().collect(); - assert!(chain.is_empty()); + assert!(state.canonical_chain().next().is_none()); } #[test] diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index 7d53c9d2d5a..70fe3a23daa 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,7 +1,6 @@ use crate::{ChainSpec, DepositContract}; use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::Header; use alloy_eips::{calc_next_block_base_fee, eip1559::BaseFeeParams, eip7840::BlobParams}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; @@ -75,8 +74,8 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { } } -impl EthChainSpec for ChainSpec { - type Header = Header; +impl EthChainSpec for ChainSpec { + type Header = H; fn chain(&self) -> Chain { self.chain diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 09e51196067..c788b3bded8 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -3,7 +3,7 @@ use alloy_evm::eth::spec::EthExecutorSpec; use crate::{ constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT}, - holesky, hoodi, mainnet, sepolia, EthChainSpec, + holesky, hoodi, sepolia, EthChainSpec, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; @@ -108,10 +108,7 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { deposit_contract: Some(MAINNET_DEPOSIT_CONTRACT), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), prune_delete_limit: MAINNET_PRUNE_DELETE_LIMIT, - blob_params: BlobScheduleBlobParams::default().with_scheduled([ - (mainnet::MAINNET_BPO1_TIMESTAMP, BlobParams::bpo1()), - (mainnet::MAINNET_BPO2_TIMESTAMP, BlobParams::bpo2()), - ]), + blob_params: BlobScheduleBlobParams::default(), }; spec.genesis.config.dao_fork_support = true; spec.into() @@ -266,7 +263,7 @@ impl From for BaseFeeParamsKind { #[derive(Clone, Debug, PartialEq, Eq, From)] pub struct ForkBaseFeeParams(Vec<(Box, BaseFeeParams)>); -impl core::ops::Deref for ChainSpec { +impl core::ops::Deref for ChainSpec { type Target = ChainHardforks; fn deref(&self) -> &Self::Target { @@ -1035,7 +1032,7 @@ impl From<&Arc> for ChainSpecBuilder { } } -impl EthExecutorSpec for ChainSpec { +impl EthExecutorSpec for ChainSpec { fn deposit_contract_address(&self) -> Option
{ self.deposit_contract.map(|deposit_contract| deposit_contract.address) } @@ -1131,10 +1128,7 @@ Merge hard forks: Post-merge hard forks (timestamp based): - Shanghai @1681338455 - Cancun @1710338135 -- Prague @1746612311 -- Osaka @1764798551 -- Bpo1 @1765978199 -- Bpo2 @1767747671" +- Prague @1746612311" ); } @@ -1378,10 +1372,7 @@ Post-merge hard forks (timestamp based): ), ( EthereumHardfork::Prague, - ForkId { - hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), - next: mainnet::MAINNET_OSAKA_TIMESTAMP, - }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), ], ); @@ -1525,22 +1516,12 @@ Post-merge hard forks (timestamp based): // First Prague block ( Head { number: 20000002, timestamp: 1746612311, ..Default::default() }, - ForkId { - hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), - next: mainnet::MAINNET_OSAKA_TIMESTAMP, - }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), - // Osaka block + // Future Prague block ( - Head { - number: 20000002, - timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, - ..Default::default() - }, - ForkId { - hash: ForkHash(hex!("0x5167e2a6")), - next: mainnet::MAINNET_BPO1_TIMESTAMP, - }, + Head { number: 20000002, timestamp: 2000000000, ..Default::default() }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), ], ); @@ -1849,22 +1830,11 @@ Post-merge hard forks (timestamp based): ), // First Prague block ( Head { number: 20000004, timestamp: 1746612311, ..Default::default() }, - ForkId { - hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), - next: mainnet::MAINNET_OSAKA_TIMESTAMP, - }, - ), - // Osaka block + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + ), // Future Prague block ( - Head { - number: 20000004, - timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, - ..Default::default() - }, - ForkId { - hash: ForkHash(hex!("0x5167e2a6")), - next: mainnet::MAINNET_BPO1_TIMESTAMP, - }, + Head { number: 20000004, timestamp: 2000000000, ..Default::default() }, + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, ), ], ); @@ -2521,8 +2491,10 @@ Post-merge hard forks (timestamp based): #[test] fn latest_eth_mainnet_fork_id() { - // BPO2 - assert_eq!(ForkId { hash: ForkHash(hex!("0xfd414558")), next: 0 }, MAINNET.latest_fork_id()) + assert_eq!( + ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + MAINNET.latest_fork_id() + ) } #[test] diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 1ceba8f57da..25f32f63a2b 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -24,7 +24,7 @@ use reth_provider::{ providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider}, ProviderFactory, StaticFileProviderFactory, }; -use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; +use reth_stages::{sets::DefaultStages, Pipeline}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; @@ -126,7 +126,6 @@ impl EnvironmentArgs { where C: ChainSpecParser, { - let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); let factory = ProviderFactory::>>::new( @@ -137,9 +136,8 @@ impl EnvironmentArgs { .with_prune_modes(prune_modes.clone()); // Check for consistency between database and static files. - if let Some(unwind_target) = factory - .static_file_provider() - .check_consistency(&factory.provider()?, has_receipt_pruning)? + if let Some(unwind_target) = + factory.static_file_provider().check_consistency(&factory.provider()?)? { if factory.db_ref().is_read_only()? { warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal."); @@ -150,7 +148,7 @@ impl EnvironmentArgs { // instead. assert_ne!( unwind_target, - PipelineTarget::Unwind(0), + 0, "A static file <> database inconsistency was found that would trigger an unwind to block 0" ); @@ -175,7 +173,7 @@ impl EnvironmentArgs { // Move all applicable data from database to static files. pipeline.move_to_static_files()?; - pipeline.unwind(unwind_target.unwind_target().expect("should exist"), None)?; + pipeline.unwind(unwind_target, None)?; } Ok(factory) diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 6214df0ec98..9d06a35dcaa 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,4 +1,3 @@ -use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ @@ -66,9 +65,10 @@ impl Command { } Subcommand::StaticFile { segment, key, raw } => { let (key, mask): (u64, _) = match segment { - StaticFileSegment::Headers => { - (table_key::(&key)?, >::MASK) - } + StaticFileSegment::Headers => ( + table_key::(&key)?, + >>::MASK, + ), StaticFileSegment::Transactions => { (table_key::(&key)?, >>::MASK) } diff --git a/crates/cli/commands/src/db/list.rs b/crates/cli/commands/src/db/list.rs index 9288a56a86c..2540e77c111 100644 --- a/crates/cli/commands/src/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -97,7 +97,7 @@ impl TableViewer<()> for ListTableViewer<'_, N> { fn view(&self) -> Result<(), Self::Error> { self.tool.provider_factory.db_ref().view(|tx| { let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; - let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?; + let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", self.args.table.name()))?; let total_entries = stats.entries(); let final_entry_idx = total_entries.saturating_sub(1); if self.args.skip > final_entry_idx { diff --git a/crates/cli/commands/src/db/repair_trie.rs b/crates/cli/commands/src/db/repair_trie.rs index e7ee8d7977c..e5b7db0e2f0 100644 --- a/crates/cli/commands/src/db/repair_trie.rs +++ b/crates/cli/commands/src/db/repair_trie.rs @@ -179,8 +179,17 @@ fn verify_and_repair( Output::StorageWrong { account, path, expected: node, .. } | Output::StorageMissing(account, path, node) => { // Wrong/missing storage node value, upsert it + // (We can't just use `upsert` method with a dup cursor, it's not properly + // supported) let nibbles = StoredNibblesSubKey(path); - let entry = StorageTrieEntry { nibbles, node }; + let entry = StorageTrieEntry { nibbles: nibbles.clone(), node }; + if storage_trie_cursor + .seek_by_key_subkey(account, nibbles.clone())? + .filter(|v| v.nibbles == nibbles) + .is_some() + { + storage_trie_cursor.delete_current()?; + } storage_trie_cursor.upsert(account, &entry)?; } Output::Progress(path) => { diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index c3e997e8343..c66ac1443a2 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -5,18 +5,17 @@ use clap::{value_parser, Args, Parser}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; -use reth_cli_util::parse_socket_address; use reth_db::init_db; use reth_node_builder::NodeBuilder; use reth_node_core::{ args::{ - DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, NetworkArgs, - PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, + DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, MetricArgs, + NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, node_config::NodeConfig, version, }; -use std::{ffi::OsString, fmt, net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{ffi::OsString, fmt, path::PathBuf, sync::Arc}; /// Start the node #[derive(Debug, Parser)] @@ -39,11 +38,9 @@ pub struct NodeCommand, - /// Enable Prometheus metrics. - /// - /// The metrics will be served at the given interface and port. - #[arg(long, value_name = "SOCKET", value_parser = parse_socket_address, help_heading = "Metrics")] - pub metrics: Option, + /// Prometheus metrics configuration. + #[command(flatten)] + pub metrics: MetricArgs, /// Add a new instance of a node. /// @@ -225,7 +222,7 @@ mod tests { use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS}; use std::{ - net::{IpAddr, Ipv4Addr}, + net::{IpAddr, Ipv4Addr, SocketAddr}, path::Path, }; @@ -318,7 +315,10 @@ mod tests { "10000000", ]) .unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); let cmd: NodeCommand = NodeCommand::try_parse_args_from([ "reth", @@ -328,7 +328,10 @@ mod tests { "10000000", ]) .unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); let cmd: NodeCommand = NodeCommand::try_parse_args_from([ "reth", @@ -338,7 +341,10 @@ mod tests { "10000000", ]) .unwrap(); - assert_eq!(cmd.metrics, Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001))); + assert_eq!( + cmd.metrics.prometheus, + Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9001)) + ); } #[test] diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index 861fd836e76..792d4533856 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -38,9 +38,9 @@ impl let header = (move || get_single_header(fetch_client.clone(), id)) .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting header. Retrying...")) .await?; - println!("Successfully downloaded header: {header:?}"); + tracing::info!(target: "reth::cli", ?header, "Successfully downloaded header"); } Subcommands::Body { args, id } => { @@ -51,13 +51,13 @@ impl let hash = match id { BlockHashOrNumber::Hash(hash) => hash, BlockHashOrNumber::Number(number) => { - println!("Block number provided. Downloading header first..."); + tracing::info!(target: "reth::cli", "Block number provided. Downloading header first..."); let client = fetch_client.clone(); let header = (move || { get_single_header(client.clone(), BlockHashOrNumber::Number(number)) }) .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting header. Retrying...")) .await?; header.hash() } @@ -67,7 +67,7 @@ impl client.get_block_bodies(vec![hash]) }) .retry(backoff) - .notify(|err, _| println!("Error requesting block: {err}. Retrying...")) + .notify(|err, _| tracing::warn!(target: "reth::cli", error = %err, "Error requesting block. Retrying...")) .await? .split(); if result.len() != 1 { @@ -77,7 +77,7 @@ impl ) } let body = result.into_iter().next().unwrap(); - println!("Successfully downloaded body: {body:?}") + tracing::info!(target: "reth::cli", ?body, "Successfully downloaded body") } Subcommands::Rlpx(command) => { command.execute().await?; diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index ca88c131ff6..f4636f5f83b 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -283,7 +283,7 @@ pub fn type_name() -> String { // With alloy type transition the types are renamed, we map them here to the original name so that test vector files remain consistent let name = std::any::type_name::(); match name { - "alloy_consensus::transaction::typed::EthereumTypedTransaction" => "Transaction".to_string(), + "alloy_consensus::transaction::envelope::EthereumTypedTransaction" => "Transaction".to_string(), "alloy_consensus::transaction::envelope::EthereumTxEnvelope" => "TransactionSigned".to_string(), name => { name.split("::").last().unwrap_or(std::any::type_name::()).to_string() diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 1bbd2604f97..ef34e5b5e84 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -54,7 +54,7 @@ pub fn generate_vectors(mut tables: Vec) -> Result<()> { match table.as_str() { $( stringify!($table_type) => { - println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); + tracing::info!(target: "reth::cli", "Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); generate_vector!($table_type$(<$($generic),+>)?, $per_table, $table_or_dup); }, diff --git a/crates/cli/util/src/sigsegv_handler.rs b/crates/cli/util/src/sigsegv_handler.rs index dabbf866cee..eeca446b72a 100644 --- a/crates/cli/util/src/sigsegv_handler.rs +++ b/crates/cli/util/src/sigsegv_handler.rs @@ -126,7 +126,7 @@ pub fn install() { libc::sigaltstack(&raw const alt_stack, ptr::null_mut()); let mut sa: libc::sigaction = mem::zeroed(); - sa.sa_sigaction = print_stack_trace as libc::sighandler_t; + sa.sa_sigaction = print_stack_trace as *const () as libc::sighandler_t; sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK; libc::sigemptyset(&raw mut sa.sa_mask); libc::sigaction(libc::SIGSEGV, &raw const sa, ptr::null_mut()); diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 22e349af0ba..669eb19d1a4 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -62,8 +62,9 @@ pub trait Consensus: HeaderValidator { /// Validate a block disregarding world state, i.e. things that can be checked before sender /// recovery and execution. /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and - /// 11.1 "Ommer Validation". + /// See the Yellow Paper sections 4.4.2 "Holistic Validity", 4.4.4 "Block Header Validity". + /// Note: Ommer Validation (previously section 11.1) has been deprecated since the Paris hard + /// fork transition to proof of stake. /// /// **This should not be called for the genesis block**. /// diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index 8d4a469ee16..5b3563c7ac3 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -12,6 +12,7 @@ workspace = true [dependencies] # reth +revm.workspace = true revm-bytecode.workspace = true revm-database.workspace = true reth-engine-primitives.workspace = true @@ -38,3 +39,13 @@ jsonrpsee.workspace = true pretty_assertions.workspace = true serde.workspace = true serde_json.workspace = true + +[dev-dependencies] +alloy-eips.workspace = true +reth-chainspec.workspace = true +reth-ethereum-primitives.workspace = true +reth-evm-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-revm = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true +tempfile.workspace = true diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index f979958a198..d00f3b8287b 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,31 +1,50 @@ use alloy_consensus::BlockHeader; -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use pretty_assertions::Comparison; use reth_engine_primitives::InvalidBlockHook; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; -use reth_provider::{BlockExecutionOutput, StateProviderFactory}; -use reth_revm::{database::StateProviderDatabase, db::BundleState, state::AccountInfo}; +use reth_provider::{BlockExecutionOutput, StateProvider, StateProviderFactory}; +use reth_revm::{ + database::StateProviderDatabase, + db::{BundleState, State}, +}; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; use reth_trie::{updates::TrieUpdates, HashedStorage}; +use revm::state::AccountInfo; use revm_bytecode::Bytecode; -use revm_database::states::{ - reverts::{AccountInfoRevert, RevertToSlot}, - AccountStatus, StorageSlot, +use revm_database::{ + states::{reverts::AccountInfoRevert, StorageSlot}, + AccountStatus, RevertToSlot, }; use serde::Serialize; use std::{collections::BTreeMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; +type CollectionResult = + (BTreeMap, BTreeMap, reth_trie::HashedPostState, BundleState); + +/// Serializable version of `BundleState` for deterministic comparison #[derive(Debug, PartialEq, Eq)] -struct AccountRevertSorted { - pub account: AccountInfoRevert, - pub storage: BTreeMap, - pub previous_status: AccountStatus, - pub wipe_storage: bool, +struct BundleStateSorted { + /// Account state + pub state: BTreeMap, + /// All created contracts in this block. + pub contracts: BTreeMap, + /// Changes to revert + /// + /// **Note**: Inside vector is *not* sorted by address. + /// + /// But it is unique by address. + pub reverts: Vec>, + /// The size of the plain state in the bundle state + pub state_size: usize, + /// The size of reverts in the bundle state + pub reverts_size: usize, } +/// Serializable version of `BundleAccount` #[derive(Debug, PartialEq, Eq)] struct BundleAccountSorted { pub info: Option, @@ -40,74 +59,120 @@ struct BundleAccountSorted { pub status: AccountStatus, } +/// Serializable version of `AccountRevert` #[derive(Debug, PartialEq, Eq)] -struct BundleStateSorted { - /// Account state - pub state: BTreeMap, - /// All created contracts in this block. - pub contracts: BTreeMap, - /// Changes to revert - /// - /// **Note**: Inside vector is *not* sorted by address. - /// - /// But it is unique by address. - pub reverts: Vec>, - /// The size of the plain state in the bundle state - pub state_size: usize, - /// The size of reverts in the bundle state - pub reverts_size: usize, +struct AccountRevertSorted { + pub account: AccountInfoRevert, + pub storage: BTreeMap, + pub previous_status: AccountStatus, + pub wipe_storage: bool, } -impl BundleStateSorted { - fn from_bundle_state(bundle_state: &BundleState) -> Self { - let state = bundle_state +/// Converts bundle state to sorted format for deterministic comparison +fn sort_bundle_state_for_comparison(bundle_state: &BundleState) -> BundleStateSorted { + BundleStateSorted { + state: bundle_state .state - .clone() - .into_iter() - .map(|(address, account)| { + .iter() + .map(|(addr, acc)| { ( - address, + *addr, BundleAccountSorted { - info: account.info, - original_info: account.original_info, - status: account.status, - storage: BTreeMap::from_iter(account.storage), + info: acc.info.clone(), + original_info: acc.original_info.clone(), + storage: BTreeMap::from_iter(acc.storage.clone()), + status: acc.status, }, ) }) - .collect(); - - let contracts = BTreeMap::from_iter(bundle_state.contracts.clone()); - - let reverts = bundle_state + .collect(), + contracts: BTreeMap::from_iter(bundle_state.contracts.clone()), + reverts: bundle_state .reverts .iter() .map(|block| { block .iter() - .map(|(address, account_revert)| { + .map(|(addr, rev)| { ( - *address, + *addr, AccountRevertSorted { - account: account_revert.account.clone(), - previous_status: account_revert.previous_status, - wipe_storage: account_revert.wipe_storage, - storage: BTreeMap::from_iter(account_revert.storage.clone()), + account: rev.account.clone(), + storage: BTreeMap::from_iter(rev.storage.clone()), + previous_status: rev.previous_status, + wipe_storage: rev.wipe_storage, }, ) }) .collect() }) - .collect(); + .collect(), + state_size: bundle_state.state_size, + reverts_size: bundle_state.reverts_size, + } +} + +/// Extracts execution data including codes, preimages, and hashed state from database +fn collect_execution_data( + mut db: State>>, +) -> eyre::Result { + let bundle_state = db.take_bundle(); + let mut codes = BTreeMap::new(); + let mut preimages = BTreeMap::new(); + let mut hashed_state = db.database.hashed_post_state(&bundle_state); + + // Collect codes + db.cache.contracts.values().chain(bundle_state.contracts.values()).for_each(|code| { + let code_bytes = code.original_bytes(); + codes.insert(keccak256(&code_bytes), code_bytes); + }); - let state_size = bundle_state.state_size; - let reverts_size = bundle_state.reverts_size; + // Collect preimages + for (address, account) in db.cache.accounts { + let hashed_address = keccak256(address); + hashed_state + .accounts + .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); - Self { state, contracts, reverts, state_size, reverts_size } + if let Some(account_data) = account.account { + preimages.insert(hashed_address, alloy_rlp::encode(address).into()); + let storage = hashed_state + .storages + .entry(hashed_address) + .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); + + for (slot, value) in account_data.storage { + let slot_bytes = B256::from(slot); + let hashed_slot = keccak256(slot_bytes); + storage.storage.insert(hashed_slot, value); + preimages.insert(hashed_slot, alloy_rlp::encode(slot_bytes).into()); + } + } } + + Ok((codes, preimages, hashed_state, bundle_state)) } -/// Generates a witness for the given block and saves it to a file. +/// Generates execution witness from collected codes, preimages, and hashed state +fn generate( + codes: BTreeMap, + preimages: BTreeMap, + hashed_state: reth_trie::HashedPostState, + state_provider: Box, +) -> eyre::Result { + let state = state_provider.witness(Default::default(), hashed_state)?; + Ok(ExecutionWitness { + state, + codes: codes.into_values().collect(), + keys: preimages.into_values().collect(), + ..Default::default() + }) +} + +/// Hook for generating execution witnesses when invalid blocks are detected. +/// +/// This hook captures the execution state and generates witness data that can be used +/// for debugging and analysis of invalid block execution. #[derive(Debug)] pub struct InvalidBlockWitnessHook { /// The provider to read the historical state and do the EVM execution. @@ -139,103 +204,51 @@ where E: ConfigureEvm + 'static, N: NodePrimitives, { - fn on_invalid_block( + /// Re-executes the block and collects execution data + fn re_execute_block( &self, parent_header: &SealedHeader, block: &RecoveredBlock, - output: &BlockExecutionOutput, - trie_updates: Option<(&TrieUpdates, B256)>, - ) -> eyre::Result<()> { - // TODO(alexey): unify with `DebugApi::debug_execution_witness` - + ) -> eyre::Result<(ExecutionWitness, BundleState)> { let mut executor = self.evm_config.batch_executor(StateProviderDatabase::new( self.provider.state_by_block_hash(parent_header.hash())?, )); executor.execute_one(block)?; + let db = executor.into_state(); + let (codes, preimages, hashed_state, bundle_state) = collect_execution_data(db)?; - // Take the bundle state - let mut db = executor.into_state(); - let bundle_state = db.take_bundle(); - - // Initialize a map of preimages. - let mut state_preimages = Vec::default(); - - // Get codes - let codes = db - .cache - .contracts - .values() - .map(|code| code.original_bytes()) - .chain( - // cache state does not have all the contracts, especially when - // a contract is created within the block - // the contract only exists in bundle state, therefore we need - // to include them as well - bundle_state.contracts.values().map(|code| code.original_bytes()), - ) - .collect(); - - // Grab all account proofs for the data accessed during block execution. - // - // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes - // referenced accounts + storage slots. - let mut hashed_state = db.database.hashed_post_state(&bundle_state); - for (address, account) in db.cache.accounts { - let hashed_address = keccak256(address); - hashed_state - .accounts - .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); + let state_provider = self.provider.state_by_block_hash(parent_header.hash())?; + let witness = generate(codes, preimages, hashed_state, state_provider)?; - let storage = hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); - - if let Some(account) = account.account { - state_preimages.push(alloy_rlp::encode(address).into()); - - for (slot, value) in account.storage { - let slot = B256::from(slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, value); + Ok((witness, bundle_state)) + } - state_preimages.push(alloy_rlp::encode(slot).into()); - } - } - } + /// Handles witness generation, saving, and comparison with healthy node + fn handle_witness_operations( + &self, + witness: &ExecutionWitness, + block_prefix: &str, + block_number: u64, + ) -> eyre::Result<()> { + let filename = format!("{}.witness.re_executed.json", block_prefix); + let re_executed_witness_path = self.save_file(filename, witness)?; - // Generate an execution witness for the aggregated state of accessed accounts. - // Destruct the cache database to retrieve the state provider. - let state_provider = db.database.into_inner(); - let state = state_provider.witness(Default::default(), hashed_state.clone())?; - - // Write the witness to the output directory. - let response = - ExecutionWitness { state, codes, keys: state_preimages, ..Default::default() }; - let re_executed_witness_path = self.save_file( - format!("{}_{}.witness.re_executed.json", block.number(), block.hash()), - &response, - )?; if let Some(healthy_node_client) = &self.healthy_node_client { - // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { DebugApiClient::<()>::debug_execution_witness( healthy_node_client, - block.number().into(), + block_number.into(), ) .await })?; - let healthy_path = self.save_file( - format!("{}_{}.witness.healthy.json", block.number(), block.hash()), - &healthy_node_witness, - )?; + let filename = format!("{}.witness.healthy.json", block_prefix); + let healthy_path = self.save_file(filename, &healthy_node_witness)?; - // If the witnesses are different, write the diff to the output directory. - if response != healthy_node_witness { - let filename = format!("{}_{}.witness.diff", block.number(), block.hash()); - let diff_path = self.save_diff(filename, &response, &healthy_node_witness)?; + if witness != &healthy_node_witness { + let filename = format!("{}.witness.diff", block_prefix); + let diff_path = self.save_diff(filename, witness, &healthy_node_witness)?; warn!( target: "engine::invalid_block_hooks::witness", diff_path = %diff_path.display(), @@ -245,29 +258,26 @@ where ); } } + Ok(()) + } - // The bundle state after re-execution should match the original one. - // - // Reverts now supports order-independent equality, so we can compare directly without - // sorting the reverts vectors. - // - // See: https://github.com/bluealloy/revm/pull/1827 - if bundle_state != output.state { - let original_path = self.save_file( - format!("{}_{}.bundle_state.original.json", block.number(), block.hash()), - &output.state, - )?; - let re_executed_path = self.save_file( - format!("{}_{}.bundle_state.re_executed.json", block.number(), block.hash()), - &bundle_state, - )?; - - let filename = format!("{}_{}.bundle_state.diff", block.number(), block.hash()); - // Convert bundle state to sorted struct which has BTreeMap instead of HashMap to - // have deterministic ordering - let bundle_state_sorted = BundleStateSorted::from_bundle_state(&bundle_state); - let output_state_sorted = BundleStateSorted::from_bundle_state(&output.state); + /// Validates that the bundle state after re-execution matches the original + fn validate_bundle_state( + &self, + re_executed_state: &BundleState, + original_state: &BundleState, + block_prefix: &str, + ) -> eyre::Result<()> { + if re_executed_state != original_state { + let original_filename = format!("{}.bundle_state.original.json", block_prefix); + let original_path = self.save_file(original_filename, original_state)?; + let re_executed_filename = format!("{}.bundle_state.re_executed.json", block_prefix); + let re_executed_path = self.save_file(re_executed_filename, re_executed_state)?; + // Convert bundle state to sorted format for deterministic comparison + let bundle_state_sorted = sort_bundle_state_for_comparison(re_executed_state); + let output_state_sorted = sort_bundle_state_for_comparison(original_state); + let filename = format!("{}.bundle_state.diff", block_prefix); let diff_path = self.save_diff(filename, &bundle_state_sorted, &output_state_sorted)?; warn!( @@ -278,37 +288,44 @@ where "Bundle state mismatch after re-execution" ); } + Ok(()) + } - // Calculate the state root and trie updates after re-execution. They should match - // the original ones. + /// Validates state root and trie updates after re-execution + fn validate_state_root_and_trie( + &self, + parent_header: &SealedHeader, + block: &RecoveredBlock, + bundle_state: &BundleState, + trie_updates: Option<(&TrieUpdates, B256)>, + block_prefix: &str, + ) -> eyre::Result<()> { + let state_provider = self.provider.state_by_block_hash(parent_header.hash())?; + let hashed_state = state_provider.hashed_post_state(bundle_state); let (re_executed_root, trie_output) = state_provider.state_root_with_updates(hashed_state)?; + if let Some((original_updates, original_root)) = trie_updates { if re_executed_root != original_root { - let filename = format!("{}_{}.state_root.diff", block.number(), block.hash()); + let filename = format!("{}.state_root.diff", block_prefix); let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?; warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution"); } - // If the re-executed state root does not match the _header_ state root, also log that. if re_executed_root != block.state_root() { - let filename = - format!("{}_{}.header_state_root.diff", block.number(), block.hash()); + let filename = format!("{}.header_state_root.diff", block_prefix); let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?; warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); } if &trie_output != original_updates { - // Trie updates are too big to diff, so we just save the original and re-executed - let trie_output_sorted = &trie_output.into_sorted_ref(); - let original_updates_sorted = &original_updates.into_sorted_ref(); let original_path = self.save_file( - format!("{}_{}.trie_updates.original.json", block.number(), block.hash()), - original_updates_sorted, + format!("{}.trie_updates.original.json", block_prefix), + &original_updates.into_sorted_ref(), )?; let re_executed_path = self.save_file( - format!("{}_{}.trie_updates.re_executed.json", block.number(), block.hash()), - trie_output_sorted, + format!("{}.trie_updates.re_executed.json", block_prefix), + &trie_output.into_sorted_ref(), )?; warn!( target: "engine::invalid_block_hooks::witness", @@ -318,11 +335,44 @@ where ); } } + Ok(()) + } + + fn on_invalid_block( + &self, + parent_header: &SealedHeader, + block: &RecoveredBlock, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ) -> eyre::Result<()> { + // TODO(alexey): unify with `DebugApi::debug_execution_witness` + let (witness, bundle_state) = self.re_execute_block(parent_header, block)?; + + let block_prefix = format!("{}_{}", block.number(), block.hash()); + self.handle_witness_operations(&witness, &block_prefix, block.number())?; + + self.validate_bundle_state(&bundle_state, &output.state, &block_prefix)?; + + self.validate_state_root_and_trie( + parent_header, + block, + &bundle_state, + trie_updates, + &block_prefix, + )?; Ok(()) } - /// Saves the diff of two values into a file with the given name in the output directory. + /// Serializes and saves a value to a JSON file in the output directory + fn save_file(&self, filename: String, value: &T) -> eyre::Result { + let path = self.output_directory.join(filename); + File::create(&path)?.write_all(serde_json::to_string(value)?.as_bytes())?; + + Ok(path) + } + + /// Compares two values and saves their diff to a file in the output directory fn save_diff( &self, filename: String, @@ -335,13 +385,6 @@ where Ok(path) } - - fn save_file(&self, filename: String, value: &T) -> eyre::Result { - let path = self.output_directory.join(filename); - File::create(&path)?.write_all(serde_json::to_string(value)?.as_bytes())?; - - Ok(path) - } } impl InvalidBlockHook for InvalidBlockWitnessHook @@ -361,3 +404,655 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip7685::Requests; + use alloy_primitives::{map::HashMap, Address, Bytes, B256, U256}; + use reth_chainspec::ChainSpec; + use reth_ethereum_primitives::EthPrimitives; + use reth_evm_ethereum::EthEvmConfig; + use reth_provider::test_utils::MockEthProvider; + use reth_revm::db::{BundleAccount, BundleState}; + use revm_database::states::reverts::AccountRevert; + use tempfile::TempDir; + + use reth_revm::test_utils::StateProviderTest; + use reth_testing_utils::generators::{self, random_block, random_eoa_accounts, BlockParams}; + use revm_bytecode::Bytecode; + + /// Creates a test `BundleState` with realistic accounts, contracts, and reverts + fn create_bundle_state() -> BundleState { + let mut rng = generators::rng(); + let mut bundle_state = BundleState::default(); + + // Generate realistic EOA accounts using generators + let accounts = random_eoa_accounts(&mut rng, 3); + + for (i, (addr, account)) in accounts.into_iter().enumerate() { + // Create storage entries for each account + let mut storage = HashMap::default(); + let storage_key = U256::from(i + 1); + storage.insert( + storage_key, + StorageSlot { + present_value: U256::from((i + 1) * 10), + previous_or_original_value: U256::from((i + 1) * 15), + }, + ); + + let bundle_account = BundleAccount { + info: Some(AccountInfo { + balance: account.balance, + nonce: account.nonce, + code_hash: account.bytecode_hash.unwrap_or_default(), + code: None, + }), + original_info: (i == 0).then(|| AccountInfo { + balance: account.balance.checked_div(U256::from(2)).unwrap_or(U256::ZERO), + nonce: 0, + code_hash: account.bytecode_hash.unwrap_or_default(), + code: None, + }), + storage, + status: AccountStatus::default(), + }; + + bundle_state.state.insert(addr, bundle_account); + } + + // Generate realistic contract bytecode using generators + let contract_hashes: Vec = (0..3).map(|_| B256::random()).collect(); + for (i, hash) in contract_hashes.iter().enumerate() { + let bytecode = match i { + 0 => Bytes::from(vec![0x60, 0x80, 0x60, 0x40, 0x52]), // Simple contract + 1 => Bytes::from(vec![0x61, 0x81, 0x60, 0x00, 0x39]), // Another contract + _ => Bytes::from(vec![0x60, 0x00, 0x60, 0x00, 0xfd]), // REVERT contract + }; + bundle_state.contracts.insert(*hash, Bytecode::new_raw(bytecode)); + } + + // Add reverts for multiple blocks using different accounts + let addresses: Vec
= bundle_state.state.keys().copied().collect(); + for (i, addr) in addresses.iter().take(2).enumerate() { + let revert = AccountRevert { + wipe_storage: i == 0, // First account has storage wiped + ..AccountRevert::default() + }; + bundle_state.reverts.push(vec![(*addr, revert)]); + } + + // Set realistic sizes + bundle_state.state_size = bundle_state.state.len(); + bundle_state.reverts_size = bundle_state.reverts.len(); + + bundle_state + } + #[test] + fn test_sort_bundle_state_for_comparison() { + // Use the fixture function to create test data + let bundle_state = create_bundle_state(); + + // Call the function under test + let sorted = sort_bundle_state_for_comparison(&bundle_state); + + // Verify state_size and reverts_size values match the fixture + assert_eq!(sorted.state_size, 3); + assert_eq!(sorted.reverts_size, 2); + + // Verify state contains our mock accounts + assert_eq!(sorted.state.len(), 3); // We added 3 accounts + + // Verify contracts contains our mock contracts + assert_eq!(sorted.contracts.len(), 3); // We added 3 contracts + + // Verify reverts is an array with multiple blocks of reverts + let reverts = &sorted.reverts; + assert_eq!(reverts.len(), 2); // Fixture has two blocks of reverts + + // Verify that the state accounts have the expected structure + for account_data in sorted.state.values() { + // BundleAccountSorted has info, original_info, storage, and status fields + // Just verify the structure exists by accessing the fields + let _info = &account_data.info; + let _original_info = &account_data.original_info; + let _storage = &account_data.storage; + let _status = &account_data.status; + } + } + + #[test] + fn test_data_collector_collect() { + // Create test data using the fixture function + let bundle_state = create_bundle_state(); + + // Create a State with StateProviderTest + let state_provider = StateProviderTest::default(); + let mut state = State::builder() + .with_database(StateProviderDatabase::new( + Box::new(state_provider) as Box + )) + .with_bundle_update() + .build(); + + // Insert contracts from the fixture into the state cache + for (code_hash, bytecode) in &bundle_state.contracts { + state.cache.contracts.insert(*code_hash, bytecode.clone()); + } + + // Manually set the bundle state in the state object + state.bundle_state = bundle_state; + + // Call the collect function + let result = collect_execution_data(state); + // Verify the function returns successfully + assert!(result.is_ok()); + + let (codes, _preimages, _hashed_state, returned_bundle_state) = result.unwrap(); + + // Verify that the returned data contains expected values + // Since we used the fixture data, we should have some codes and state + assert!(!codes.is_empty(), "Expected some bytecode entries"); + assert!(!returned_bundle_state.state.is_empty(), "Expected some state entries"); + + // Verify the bundle state structure matches our fixture + assert_eq!(returned_bundle_state.state.len(), 3, "Expected 3 accounts from fixture"); + assert_eq!(returned_bundle_state.contracts.len(), 3, "Expected 3 contracts from fixture"); + } + + #[test] + fn test_re_execute_block() { + // Create hook instance + let (hook, _output_directory, _temp_dir) = create_test_hook(); + + // Setup to call re_execute_block + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + + // Create a random block that inherits from the parent header + let recovered_block = random_block( + &mut rng, + 2, // block number + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let result = hook.re_execute_block(&parent_header, &recovered_block); + + // Verify the function behavior with mock data + assert!(result.is_ok(), "re_execute_block should return Ok"); + } + + /// Creates test `InvalidBlockWitnessHook` with temporary directory + fn create_test_hook() -> ( + InvalidBlockWitnessHook, EthEvmConfig>, + PathBuf, + TempDir, + ) { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let output_directory = temp_dir.path().to_path_buf(); + + let provider = MockEthProvider::::default(); + let evm_config = EthEvmConfig::mainnet(); + + let hook = + InvalidBlockWitnessHook::new(provider, evm_config, output_directory.clone(), None); + + (hook, output_directory, temp_dir) + } + + #[test] + fn test_handle_witness_operations_with_healthy_client_mock() { + // Create hook instance with mock healthy client + let (hook, output_directory, _temp_dir) = create_test_hook(); + + // Create sample ExecutionWitness with correct types + let witness = ExecutionWitness { + state: vec![Bytes::from("state_data")], + codes: vec![Bytes::from("code_data")], + keys: vec![Bytes::from("key_data")], + ..Default::default() + }; + + // Call handle_witness_operations + let result = hook.handle_witness_operations(&witness, "test_block_healthy", 67890); + + // Should succeed + assert!(result.is_ok()); + + // Check that witness file was created + let witness_file = output_directory.join("test_block_healthy.witness.re_executed.json"); + assert!(witness_file.exists()); + } + + #[test] + fn test_handle_witness_operations_file_creation() { + // Test file creation and content validation + let (hook, output_directory, _temp_dir) = create_test_hook(); + + let witness = ExecutionWitness { + state: vec![Bytes::from("test_state")], + codes: vec![Bytes::from("test_code")], + keys: vec![Bytes::from("test_key")], + ..Default::default() + }; + + let block_prefix = "file_test_block"; + let block_number = 11111; + + // Call handle_witness_operations + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + + // Verify file was created with correct name + let expected_file = + output_directory.join(format!("{}.witness.re_executed.json", block_prefix)); + assert!(expected_file.exists()); + + // Read and verify file content is valid JSON and contains witness structure + let file_content = std::fs::read_to_string(&expected_file).expect("Failed to read file"); + let parsed_witness: serde_json::Value = + serde_json::from_str(&file_content).expect("File should contain valid JSON"); + + // Verify the JSON structure contains expected fields + assert!(parsed_witness.get("state").is_some(), "JSON should contain 'state' field"); + assert!(parsed_witness.get("codes").is_some(), "JSON should contain 'codes' field"); + assert!(parsed_witness.get("keys").is_some(), "JSON should contain 'keys' field"); + } + + #[test] + fn test_proof_generator_generate() { + // Use existing MockEthProvider + let mock_provider = MockEthProvider::default(); + let state_provider: Box = Box::new(mock_provider); + + // Mock Data + let mut codes = BTreeMap::new(); + codes.insert(B256::from([1u8; 32]), Bytes::from("contract_code_1")); + codes.insert(B256::from([2u8; 32]), Bytes::from("contract_code_2")); + + let mut preimages = BTreeMap::new(); + preimages.insert(B256::from([3u8; 32]), Bytes::from("preimage_1")); + preimages.insert(B256::from([4u8; 32]), Bytes::from("preimage_2")); + + let hashed_state = reth_trie::HashedPostState::default(); + + // Call generate function + let result = generate(codes.clone(), preimages.clone(), hashed_state, state_provider); + + // Verify result + assert!(result.is_ok(), "generate function should succeed"); + let execution_witness = result.unwrap(); + + assert!(execution_witness.state.is_empty(), "State should be empty from MockEthProvider"); + + let expected_codes: Vec = codes.into_values().collect(); + assert_eq!( + execution_witness.codes.len(), + expected_codes.len(), + "Codes length should match" + ); + for code in &expected_codes { + assert!( + execution_witness.codes.contains(code), + "Codes should contain expected bytecode" + ); + } + + let expected_keys: Vec = preimages.into_values().collect(); + assert_eq!(execution_witness.keys.len(), expected_keys.len(), "Keys length should match"); + for key in &expected_keys { + assert!(execution_witness.keys.contains(key), "Keys should contain expected preimage"); + } + } + + #[test] + fn test_validate_bundle_state_matching() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + let block_prefix = "test_block_123"; + + // Test with identical states - should not produce any warnings or files + let result = hook.validate_bundle_state(&bundle_state, &bundle_state, block_prefix); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_mismatch() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let original_state = create_bundle_state(); + let mut modified_state = create_bundle_state(); + + // Modify the state to create a mismatch + let addr = Address::from([1u8; 20]); + if let Some(account) = modified_state.state.get_mut(&addr) && + let Some(ref mut info) = account.info + { + info.balance = U256::from(999); + } + + let block_prefix = "test_block_mismatch"; + + // Test with different states - should save files and log warning + let result = hook.validate_bundle_state(&modified_state, &original_state, block_prefix); + assert!(result.is_ok()); + + // Verify that files were created + let original_file = output_dir.join(format!("{}.bundle_state.original.json", block_prefix)); + let re_executed_file = + output_dir.join(format!("{}.bundle_state.re_executed.json", block_prefix)); + let diff_file = output_dir.join(format!("{}.bundle_state.diff", block_prefix)); + + assert!(original_file.exists(), "Original bundle state file should be created"); + assert!(re_executed_file.exists(), "Re-executed bundle state file should be created"); + assert!(diff_file.exists(), "Diff file should be created"); + } + + /// Creates test `TrieUpdates` with account nodes and removed nodes + fn create_test_trie_updates() -> TrieUpdates { + use alloy_primitives::map::HashMap; + use reth_trie::{updates::TrieUpdates, BranchNodeCompact, Nibbles}; + use std::collections::HashSet; + + let mut account_nodes = HashMap::default(); + let nibbles = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3]); + let branch_node = BranchNodeCompact::new( + 0b1010, // state_mask + 0b1010, // tree_mask - must be subset of state_mask + 0b1000, // hash_mask + vec![B256::from([1u8; 32])], // hashes + None, // root_hash + ); + account_nodes.insert(nibbles, branch_node); + + let mut removed_nodes = HashSet::default(); + removed_nodes.insert(Nibbles::from_nibbles_unchecked([0x4, 0x5, 0x6])); + + TrieUpdates { account_nodes, removed_nodes, storage_tries: HashMap::default() } + } + + #[test] + fn test_validate_state_root_and_trie_with_trie_updates() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + // Generate test data + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let trie_updates = create_test_trie_updates(); + let original_root = B256::from([2u8; 32]); // Different from what will be computed + let block_prefix = "test_state_root_with_trie"; + + // Test with trie updates - this will likely produce warnings due to mock data + let result = hook.validate_state_root_and_trie( + &parent_header, + &recovered_block, + &bundle_state, + Some((&trie_updates, original_root)), + block_prefix, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_on_invalid_block_calls_all_validation_methods() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + // Generate test data + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + // Create mock BlockExecutionOutput + let output = BlockExecutionOutput { + state: bundle_state, + result: reth_provider::BlockExecutionResult { + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + blob_gas_used: 0, + }, + }; + + // Create test trie updates + let trie_updates = create_test_trie_updates(); + let state_root = B256::random(); + + // Test that on_invalid_block attempts to call all its internal methods + // by checking that it doesn't panic and tries to create files + let files_before = output_dir.read_dir().unwrap().count(); + + let _result = hook.on_invalid_block( + &parent_header, + &recovered_block, + &output, + Some((&trie_updates, state_root)), + ); + + // Verify that the function attempted to process the block: + // Either it succeeded, or it created some output files during processing + let files_after = output_dir.read_dir().unwrap().count(); + + // The function should attempt to execute its workflow + assert!( + files_after >= files_before, + "on_invalid_block should attempt to create output files during processing" + ); + } + + #[test] + fn test_handle_witness_operations_with_empty_witness() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let witness = ExecutionWitness::default(); + let block_prefix = "empty_witness_test"; + let block_number = 12345; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_handle_witness_operations_with_zero_block_number() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let witness = ExecutionWitness { + state: vec![Bytes::from("test_state")], + codes: vec![Bytes::from("test_code")], + keys: vec![Bytes::from("test_key")], + ..Default::default() + }; + let block_prefix = "zero_block_test"; + let block_number = 0; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_handle_witness_operations_with_large_witness_data() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let large_data = vec![0u8; 10000]; // 10KB of data + let witness = ExecutionWitness { + state: vec![Bytes::from(large_data.clone())], + codes: vec![Bytes::from(large_data.clone())], + keys: vec![Bytes::from(large_data)], + ..Default::default() + }; + let block_prefix = "large_witness_test"; + let block_number = 999999; + + let result = hook.handle_witness_operations(&witness, block_prefix, block_number); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_with_empty_states() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let empty_state = BundleState::default(); + let block_prefix = "empty_states_test"; + + let result = hook.validate_bundle_state(&empty_state, &empty_state, block_prefix); + assert!(result.is_ok()); + } + + #[test] + fn test_validate_bundle_state_with_different_contract_counts() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let state1 = create_bundle_state(); + let mut state2 = create_bundle_state(); + + // Add extra contract to state2 + let extra_contract_hash = B256::random(); + state2.contracts.insert( + extra_contract_hash, + Bytecode::new_raw(Bytes::from(vec![0x60, 0x00, 0x60, 0x00, 0xfd])), // REVERT opcode + ); + + let block_prefix = "different_contracts_test"; + let result = hook.validate_bundle_state(&state1, &state2, block_prefix); + assert!(result.is_ok()); + + // Verify diff files were created + let diff_file = output_dir.join(format!("{}.bundle_state.diff", block_prefix)); + assert!(diff_file.exists()); + } + + #[test] + fn test_save_diff_with_identical_values() { + let (hook, output_dir, _temp_dir) = create_test_hook(); + let value1 = "identical_value"; + let value2 = "identical_value"; + let filename = "identical_diff_test".to_string(); + + let result = hook.save_diff(filename.clone(), &value1, &value2); + assert!(result.is_ok()); + + let diff_file = output_dir.join(filename); + assert!(diff_file.exists()); + } + + #[test] + fn test_validate_state_root_and_trie_without_trie_updates() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let bundle_state = create_bundle_state(); + + let mut rng = generators::rng(); + let parent_header = generators::random_header(&mut rng, 1, None); + let recovered_block = random_block( + &mut rng, + 2, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(0), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let block_prefix = "no_trie_updates_test"; + + // Test without trie updates (None case) + let result = hook.validate_state_root_and_trie( + &parent_header, + &recovered_block, + &bundle_state, + None, + block_prefix, + ); + assert!(result.is_ok()); + } + + #[test] + fn test_complete_invalid_block_workflow() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let mut rng = generators::rng(); + + // Create a realistic block scenario + let parent_header = generators::random_header(&mut rng, 100, None); + let invalid_block = random_block( + &mut rng, + 101, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(3), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let bundle_state = create_bundle_state(); + let trie_updates = create_test_trie_updates(); + + // Test validation methods + let validation_result = + hook.validate_bundle_state(&bundle_state, &bundle_state, "integration_test"); + assert!(validation_result.is_ok(), "Bundle state validation should succeed"); + + let state_root_result = hook.validate_state_root_and_trie( + &parent_header, + &invalid_block, + &bundle_state, + Some((&trie_updates, B256::random())), + "integration_test", + ); + assert!(state_root_result.is_ok(), "State root validation should succeed"); + } + + #[test] + fn test_integration_workflow_components() { + let (hook, _output_dir, _temp_dir) = create_test_hook(); + let mut rng = generators::rng(); + + // Create test data + let parent_header = generators::random_header(&mut rng, 50, None); + let _invalid_block = random_block( + &mut rng, + 51, + BlockParams { + parent: Some(parent_header.hash()), + tx_count: Some(2), + ..Default::default() + }, + ) + .try_recover() + .unwrap(); + + let bundle_state = create_bundle_state(); + let _trie_updates = create_test_trie_updates(); + + // Test individual components that would be part of the complete flow + let validation_result = + hook.validate_bundle_state(&bundle_state, &bundle_state, "integration_component_test"); + assert!(validation_result.is_ok(), "Component validation should succeed"); + } +} diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index e5f58523d03..9e2c8210f08 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -6,9 +6,32 @@ pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; /// How close to the canonical head we persist blocks. pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 0; -/// Default maximum concurrency for proof tasks +/// Default maximum concurrency for on-demand proof tasks (blinded nodes) pub const DEFAULT_MAX_PROOF_TASK_CONCURRENCY: u64 = 256; +/// Minimum number of workers we allow configuring explicitly. +pub const MIN_WORKER_COUNT: usize = 32; + +/// Returns the default number of storage worker threads based on available parallelism. +fn default_storage_worker_count() -> usize { + #[cfg(feature = "std")] + { + std::thread::available_parallelism().map_or(8, |n| n.get() * 2).min(MIN_WORKER_COUNT) + } + #[cfg(not(feature = "std"))] + { + 8 + } +} + +/// Returns the default number of account worker threads. +/// +/// Account workers coordinate storage proof collection and account trie traversal. +/// They are set to the same count as storage workers for simplicity. +fn default_account_worker_count() -> usize { + default_storage_worker_count() +} + /// The size of proof targets chunk to spawn in one multiproof calculation. pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 10; @@ -109,6 +132,10 @@ pub struct TreeConfig { prewarm_max_concurrency: usize, /// Whether to unwind canonical header to ancestor during forkchoice updates. allow_unwind_canonical_header: bool, + /// Number of storage proof worker threads. + storage_worker_count: usize, + /// Number of account proof worker threads. + account_worker_count: usize, } impl Default for TreeConfig { @@ -135,6 +162,8 @@ impl Default for TreeConfig { always_process_payload_attributes_on_canonical_head: false, prewarm_max_concurrency: DEFAULT_PREWARM_MAX_CONCURRENCY, allow_unwind_canonical_header: false, + storage_worker_count: default_storage_worker_count(), + account_worker_count: default_account_worker_count(), } } } @@ -164,7 +193,10 @@ impl TreeConfig { always_process_payload_attributes_on_canonical_head: bool, prewarm_max_concurrency: usize, allow_unwind_canonical_header: bool, + storage_worker_count: usize, + account_worker_count: usize, ) -> Self { + assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); Self { persistence_threshold, memory_block_buffer_target, @@ -187,6 +219,8 @@ impl TreeConfig { always_process_payload_attributes_on_canonical_head, prewarm_max_concurrency, allow_unwind_canonical_header, + storage_worker_count, + account_worker_count, } } @@ -394,6 +428,7 @@ impl TreeConfig { mut self, max_proof_task_concurrency: u64, ) -> Self { + assert!(max_proof_task_concurrency > 0, "max_proof_task_concurrency must be at least 1"); self.max_proof_task_concurrency = max_proof_task_concurrency; self } @@ -452,4 +487,26 @@ impl TreeConfig { pub const fn prewarm_max_concurrency(&self) -> usize { self.prewarm_max_concurrency } + + /// Return the number of storage proof worker threads. + pub const fn storage_worker_count(&self) -> usize { + self.storage_worker_count + } + + /// Setter for the number of storage proof worker threads. + pub fn with_storage_worker_count(mut self, storage_worker_count: usize) -> Self { + self.storage_worker_count = storage_worker_count.max(MIN_WORKER_COUNT); + self + } + + /// Return the number of account proof worker threads. + pub const fn account_worker_count(&self) -> usize { + self.account_worker_count + } + + /// Setter for the number of account proof worker threads. + pub fn with_account_worker_count(mut self, account_worker_count: usize) -> Self { + self.account_worker_count = account_worker_count.max(MIN_WORKER_COUNT); + self + } } diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index 0e7b153106f..91a12138755 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -230,16 +230,22 @@ fn bench_state_root(c: &mut Criterion) { }, |(genesis_hash, mut payload_processor, provider, state_updates)| { black_box({ - let mut handle = payload_processor.spawn( - Default::default(), - core::iter::empty::< - Result, core::convert::Infallible>, - >(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::default(), - &TreeConfig::default(), - ); + let mut handle = payload_processor + .spawn( + Default::default(), + core::iter::empty::< + Result< + Recovered, + core::convert::Infallible, + >, + >(), + StateProviderBuilder::new(provider.clone(), genesis_hash, None), + ConsistentDbView::new_with_latest_tip(provider).unwrap(), + TrieInput::default(), + &TreeConfig::default(), + ) + .map_err(|(err, ..)| err) + .expect("failed to spawn payload processor"); let mut state_hook = handle.state_hook(); diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index 2ec00f9b918..e011a54b73c 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -3,9 +3,8 @@ use reth_chainspec::ChainSpec; use reth_ethereum_primitives::BlockBody; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives_traits::SealedHeader; -use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, +use reth_provider::test_utils::{ + create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }; use reth_prune_types::PruneModes; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; @@ -18,13 +17,12 @@ use tokio::sync::watch; #[derive(Default, Debug)] pub struct TestPipelineBuilder { pipeline_exec_outputs: VecDeque>, - executor_results: Vec, } impl TestPipelineBuilder { /// Create a new [`TestPipelineBuilder`]. pub const fn new() -> Self { - Self { pipeline_exec_outputs: VecDeque::new(), executor_results: Vec::new() } + Self { pipeline_exec_outputs: VecDeque::new() } } /// Set the pipeline execution outputs to use for the test consensus engine. @@ -37,8 +35,14 @@ impl TestPipelineBuilder { } /// Set the executor results to use for the test consensus engine. - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_results = executor_results; + #[deprecated( + note = "no-op: executor results are not used and will be removed in a future release" + )] + pub fn with_executor_results( + self, + executor_results: Vec, + ) -> Self { + let _ = executor_results; self } diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index 9f4eb8398df..8553a9fe63c 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -1,5 +1,8 @@ //! Execution cache implementation for block processing. -use alloy_primitives::{Address, StorageKey, StorageValue, B256}; +use alloy_primitives::{ + map::{DefaultHashBuilder, HashSet}, + Address, StorageKey, StorageValue, B256, +}; use metrics::Gauge; use mini_moka::sync::CacheBuilder; use reth_errors::ProviderResult; @@ -14,7 +17,6 @@ use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; -use revm_primitives::map::DefaultHashBuilder; use std::{sync::Arc, time::Duration}; use tracing::trace; @@ -300,65 +302,69 @@ pub(crate) struct ExecutionCache { /// Cache for contract bytecode, keyed by code hash. code_cache: Cache>, - /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s - /// storage slots. - storage_cache: Cache, + /// Flattened storage cache: composite key of (`Address`, `StorageKey`) maps directly to + /// values. + storage_cache: Cache<(Address, StorageKey), Option>, /// Cache for basic account information (nonce, balance, code hash). account_cache: Cache>, } impl ExecutionCache { - /// Get storage value from hierarchical cache. + /// Get storage value from flattened cache. /// /// Returns a `SlotStatus` indicating whether: - /// - `NotCached`: The account's storage cache doesn't exist - /// - `Empty`: The slot exists in the account's cache but is empty + /// - `NotCached`: The storage slot is not in the cache + /// - `Empty`: The slot exists in the cache but is empty /// - `Value`: The slot exists and has a specific value pub(crate) fn get_storage(&self, address: &Address, key: &StorageKey) -> SlotStatus { - match self.storage_cache.get(address) { + match self.storage_cache.get(&(*address, *key)) { None => SlotStatus::NotCached, - Some(account_cache) => account_cache.get_storage(key), + Some(None) => SlotStatus::Empty, + Some(Some(value)) => SlotStatus::Value(value), } } - /// Insert storage value into hierarchical cache + /// Insert storage value into flattened cache pub(crate) fn insert_storage( &self, address: Address, key: StorageKey, value: Option, ) { - self.insert_storage_bulk(address, [(key, value)]); + self.storage_cache.insert((address, key), value); } - /// Insert multiple storage values into hierarchical cache for a single account + /// Insert multiple storage values into flattened cache for a single account /// - /// This method is optimized for inserting multiple storage values for the same address - /// by doing the account cache lookup only once instead of for each key-value pair. + /// This method inserts multiple storage values for the same address directly + /// into the flattened cache. pub(crate) fn insert_storage_bulk(&self, address: Address, storage_entries: I) where I: IntoIterator)>, { - let account_cache = self.storage_cache.get(&address).unwrap_or_else(|| { - let account_cache = AccountStorageCache::default(); - self.storage_cache.insert(address, account_cache.clone()); - account_cache - }); - for (key, value) in storage_entries { - account_cache.insert_storage(key, value); + self.storage_cache.insert((address, key), value); } } - /// Invalidate storage for specific account - pub(crate) fn invalidate_account_storage(&self, address: &Address) { - self.storage_cache.invalidate(address); - } - /// Returns the total number of storage slots cached across all accounts pub(crate) fn total_storage_slots(&self) -> usize { - self.storage_cache.iter().map(|addr| addr.len()).sum() + self.storage_cache.entry_count() as usize + } + + /// Invalidates the storage for all addresses in the set + pub(crate) fn invalidate_storages(&self, addresses: HashSet<&Address>) { + // NOTE: this must collect because the invalidate function should not be called while we + // hold an iter for it + let storage_entries = self + .storage_cache + .iter() + .filter_map(|entry| addresses.contains(&entry.key().0).then_some(*entry.key())) + .collect::>(); + for key in storage_entries { + self.storage_cache.invalidate(&key) + } } /// Inserts the post-execution state changes into the cache. @@ -385,6 +391,7 @@ impl ExecutionCache { self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone()))); } + let mut invalidated_accounts = HashSet::default(); for (addr, account) in &state_updates.state { // If the account was not modified, as in not changed and not destroyed, then we have // nothing to do w.r.t. this particular account and can move on @@ -397,7 +404,7 @@ impl ExecutionCache { // Invalidate the account cache entry if destroyed self.account_cache.invalidate(addr); - self.invalidate_account_storage(addr); + invalidated_accounts.insert(addr); continue } @@ -424,6 +431,9 @@ impl ExecutionCache { self.account_cache.insert(*addr, Some(Account::from(account_info))); } + // invalidate storage for all destroyed accounts + self.invalidate_storages(invalidated_accounts); + Ok(()) } } @@ -452,11 +462,11 @@ impl ExecutionCacheBuilder { const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour let storage_cache = CacheBuilder::new(self.storage_cache_entries) - .weigher(|_key: &Address, value: &AccountStorageCache| -> u32 { - // values based on results from measure_storage_cache_overhead test - let base_weight = 39_000; - let slots_weight = value.len() * 218; - (base_weight + slots_weight) as u32 + .weigher(|_key: &(Address, StorageKey), _value: &Option| -> u32 { + // Size of composite key (Address + StorageKey) + Option + // Address: 20 bytes, StorageKey: 32 bytes, Option: 33 bytes + // Plus some overhead for the hash map entry + 120_u32 }) .max_capacity(storage_cache_size) .time_to_live(EXPIRY_TIME) @@ -573,56 +583,6 @@ impl SavedCache { } } -/// Cache for an individual account's storage slots. -/// -/// This represents the second level of the hierarchical storage cache. -/// Each account gets its own `AccountStorageCache` to store accessed storage slots. -#[derive(Debug, Clone)] -pub(crate) struct AccountStorageCache { - /// Map of storage keys to their cached values. - slots: Cache>, -} - -impl AccountStorageCache { - /// Create a new [`AccountStorageCache`] - pub(crate) fn new(max_slots: u64) -> Self { - Self { - slots: CacheBuilder::new(max_slots).build_with_hasher(DefaultHashBuilder::default()), - } - } - - /// Get a storage value from this account's cache. - /// - `NotCached`: The slot is not in the cache - /// - `Empty`: The slot is empty - /// - `Value`: The slot has a specific value - pub(crate) fn get_storage(&self, key: &StorageKey) -> SlotStatus { - match self.slots.get(key) { - None => SlotStatus::NotCached, - Some(None) => SlotStatus::Empty, - Some(Some(value)) => SlotStatus::Value(value), - } - } - - /// Insert a storage value - pub(crate) fn insert_storage(&self, key: StorageKey, value: Option) { - self.slots.insert(key, value); - } - - /// Returns the number of slots in the cache - pub(crate) fn len(&self) -> usize { - self.slots.entry_count() as usize - } -} - -impl Default for AccountStorageCache { - fn default() -> Self { - // With weigher and max_capacity in place, this number represents - // the maximum number of entries that can be stored, not the actual - // memory usage which is controlled by storage cache's max_capacity. - Self::new(1_000_000) - } -} - #[cfg(test)] mod tests { use super::*; @@ -697,32 +657,36 @@ mod tests { #[test] fn measure_storage_cache_overhead() { - let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000)); - println!("Base AccountStorageCache overhead: {base_overhead} bytes"); + let (base_overhead, cache) = + measure_allocation(|| ExecutionCacheBuilder::default().build_caches(1000)); + println!("Base ExecutionCache overhead: {base_overhead} bytes"); let mut rng = rand::rng(); + let address = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); let (first_slot, _) = measure_allocation(|| { - cache.insert_storage(key, Some(value)); + cache.insert_storage(address, key, Some(value)); }); println!("First slot insertion overhead: {first_slot} bytes"); const TOTAL_SLOTS: usize = 10_000; let (test_slots, _) = measure_allocation(|| { for _ in 0..TOTAL_SLOTS { + let addr = Address::random(); let key = StorageKey::random(); let value = StorageValue::from(rng.random::()); - cache.insert_storage(key, Some(value)); + cache.insert_storage(addr, key, Some(value)); } }); println!("Average overhead over {} slots: {} bytes", TOTAL_SLOTS, test_slots / TOTAL_SLOTS); println!("\nTheoretical sizes:"); + println!("Address size: {} bytes", size_of::
()); println!("StorageKey size: {} bytes", size_of::()); println!("StorageValue size: {} bytes", size_of::()); println!("Option size: {} bytes", size_of::>()); - println!("Option size: {} bytes", size_of::>()); + println!("(Address, StorageKey) size: {} bytes", size_of::<(Address, StorageKey)>()); } #[test] diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 4d3310543d1..c014d8ba15e 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -122,6 +122,10 @@ pub(crate) struct TreeMetrics { pub reorgs: Counter, /// The latest reorg depth pub latest_reorg_depth: Gauge, + /// The current safe block height (this is required by optimism) + pub safe_block_height: Gauge, + /// The current finalized block height (this is required by optimism) + pub finalized_block_height: Gauge, } /// Metrics for the `EngineApi`. @@ -310,6 +314,7 @@ mod tests { receipts: vec![], requests: Requests::default(), gas_used: 1000, + blob_gas_used: 0, }, )) } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 24bdc069f09..7f1183f5efc 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1015,23 +1015,79 @@ where version: EngineApiMessageVersion, ) -> ProviderResult> { trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); + + // Record metrics + self.record_forkchoice_metrics(&attrs); + + // Pre-validation of forkchoice state + if let Some(early_result) = self.validate_forkchoice_state(state)? { + return Ok(TreeOutcome::new(early_result)); + } + + // Return early if we are on the correct fork + if let Some(result) = self.handle_canonical_head(state, &attrs, version)? { + return Ok(result); + } + + // Attempt to apply a chain update when the head differs from our canonical chain. + // This handles reorgs and chain extensions by making the specified head canonical. + if let Some(result) = self.apply_chain_update(state, &attrs, version)? { + return Ok(result); + } + + // Fallback that ensures to catch up to the network's state. + self.handle_missing_block(state) + } + + /// Records metrics for forkchoice updated calls + fn record_forkchoice_metrics(&self, attrs: &Option) { self.metrics.engine.forkchoice_updated_messages.increment(1); if attrs.is_some() { self.metrics.engine.forkchoice_with_attributes_updated_messages.increment(1); } self.canonical_in_memory_state.on_forkchoice_update_received(); + } - if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { - return Ok(TreeOutcome::new(on_updated)) + /// Pre-validates the forkchoice state and returns early if validation fails. + /// + /// Returns `Some(OnForkChoiceUpdated)` if validation fails and an early response should be + /// returned. Returns `None` if validation passes and processing should continue. + fn validate_forkchoice_state( + &mut self, + state: ForkchoiceState, + ) -> ProviderResult> { + if state.head_block_hash.is_zero() { + return Ok(Some(OnForkChoiceUpdated::invalid_state())); } - let valid_outcome = |head| { - TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(head), - ))) - }; + // Check if the new head hash is connected to any ancestor that we previously marked as + // invalid + let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); + if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { + return Ok(Some(OnForkChoiceUpdated::with_invalid(status))); + } + if !self.backfill_sync_state.is_idle() { + // We can only process new forkchoice updates if the pipeline is idle, since it requires + // exclusive access to the database + trace!(target: "engine::tree", "Pipeline is syncing, skipping forkchoice update"); + return Ok(Some(OnForkChoiceUpdated::syncing())); + } + + Ok(None) + } + + /// Handles the case where the forkchoice head is already canonical. + /// + /// Returns `Some(TreeOutcome)` if the head is already canonical and + /// processing is complete. Returns `None` if the head is not canonical and processing + /// should continue. + fn handle_canonical_head( + &self, + state: ForkchoiceState, + attrs: &Option, // Changed to reference + version: EngineApiMessageVersion, + ) -> ProviderResult>> { // Process the forkchoice update by trying to make the head block canonical // // We can only process this forkchoice update if: @@ -1046,34 +1102,58 @@ where // - emitting a canonicalization event for the new chain (including reorg) // - if we have payload attributes, delegate them to the payload service - // 1. ensure we have a new head block - if self.state.tree_state.canonical_block_hash() == state.head_block_hash { - trace!(target: "engine::tree", "fcu head hash is already canonical"); + if self.state.tree_state.canonical_block_hash() != state.head_block_hash { + return Ok(None); + } - // update the safe and finalized blocks and ensure their values are valid - if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { - // safe or finalized hashes are invalid - return Ok(TreeOutcome::new(outcome)) - } + trace!(target: "engine::tree", "fcu head hash is already canonical"); - // we still need to process payload attributes if the head is already canonical - if let Some(attr) = attrs { - let tip = self - .sealed_header_by_hash(self.state.tree_state.canonical_block_hash())? - .ok_or_else(|| { - // If we can't find the canonical block, then something is wrong and we need - // to return an error - ProviderError::HeaderNotFound(state.head_block_hash.into()) - })?; - let updated = self.process_payload_attributes(attr, &tip, state, version); - return Ok(TreeOutcome::new(updated)) - } + // Update the safe and finalized blocks and ensure their values are valid + if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { + // safe or finalized hashes are invalid + return Ok(Some(TreeOutcome::new(outcome))); + } - // the head block is already canonical - return Ok(valid_outcome(state.head_block_hash)) + // Process payload attributes if the head is already canonical + if let Some(attr) = attrs { + let tip = self + .sealed_header_by_hash(self.state.tree_state.canonical_block_hash())? + .ok_or_else(|| { + // If we can't find the canonical block, then something is wrong and we need + // to return an error + ProviderError::HeaderNotFound(state.head_block_hash.into()) + })?; + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes(attr.clone(), &tip, state, version); + return Ok(Some(TreeOutcome::new(updated))); } - // 2. check if the head is already part of the canonical chain + // The head block is already canonical + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + Ok(Some(outcome)) + } + + /// Applies chain update for the new head block and processes payload attributes. + /// + /// This method handles the case where the forkchoice head differs from our current canonical + /// head. It attempts to make the specified head block canonical by: + /// - Checking if the head is already part of the canonical chain + /// - Applying chain reorganizations (reorgs) if necessary + /// - Processing payload attributes if provided + /// - Returning the appropriate forkchoice update response + /// + /// Returns `Some(TreeOutcome)` if a chain update was successfully applied. + /// Returns `None` if no chain update was needed or possible. + fn apply_chain_update( + &mut self, + state: ForkchoiceState, + attrs: &Option, + version: EngineApiMessageVersion, + ) -> ProviderResult>> { + // Check if the head is already part of the canonical chain if let Ok(Some(canonical_header)) = self.find_canonical_header(state.head_block_hash) { debug!(target: "engine::tree", head = canonical_header.number(), "fcu head block is already canonical"); @@ -1084,9 +1164,14 @@ where { if let Some(attr) = attrs { debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head"); - let updated = - self.process_payload_attributes(attr, &canonical_header, state, version); - return Ok(TreeOutcome::new(updated)) + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes( + attr.clone(), + &canonical_header, + state, + version, + ); + return Ok(Some(TreeOutcome::new(updated))); } // At this point, no alternative block has been triggered, so we need effectively @@ -1095,52 +1180,75 @@ where // canonical ancestor. This ensures that state providers and the // transaction pool operate with the correct chain state after // forkchoice update processing. + if self.config.unwind_canonical_header() { self.update_latest_block_to_canonical_ancestor(&canonical_header)?; } } - // 2. Client software MAY skip an update of the forkchoice state and MUST NOT begin a - // payload build process if `forkchoiceState.headBlockHash` references a `VALID` - // ancestor of the head of canonical chain, i.e. the ancestor passed payload - // validation process and deemed `VALID`. In the case of such an event, client - // software MUST return `{payloadStatus: {status: VALID, latestValidHash: - // forkchoiceState.headBlockHash, validationError: null}, payloadId: null}` + // According to the Engine API specification, client software MAY skip an update of the + // forkchoice state and MUST NOT begin a payload build process if + // `forkchoiceState.headBlockHash` references a `VALID` ancestor of the head + // of canonical chain, i.e. the ancestor passed payload validation process + // and deemed `VALID`. In the case of such an event, client software MUST + // return `{payloadStatus: {status: VALID, latestValidHash: + // forkchoiceState.headBlockHash, validationError: null}, payloadId: null}` + + // The head block is already canonical and we're not processing payload attributes, + // so we're not triggering a payload job and can return right away - // the head block is already canonical, so we're not triggering a payload job and can - // return right away - return Ok(valid_outcome(state.head_block_hash)) + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + return Ok(Some(outcome)); } - // 3. ensure we can apply a new chain update for the head block + // Ensure we can apply a new chain update for the head block if let Some(chain_update) = self.on_new_head(state.head_block_hash)? { let tip = chain_update.tip().clone_sealed_header(); self.on_canonical_chain_update(chain_update); - // update the safe and finalized blocks and ensure their values are valid + // Update the safe and finalized blocks and ensure their values are valid if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { // safe or finalized hashes are invalid - return Ok(TreeOutcome::new(outcome)) + return Ok(Some(TreeOutcome::new(outcome))); } if let Some(attr) = attrs { - let updated = self.process_payload_attributes(attr, &tip, state, version); - return Ok(TreeOutcome::new(updated)) + // Clone only when we actually need to process the attributes + let updated = self.process_payload_attributes(attr.clone(), &tip, state, version); + return Ok(Some(TreeOutcome::new(updated))); } - return Ok(valid_outcome(state.head_block_hash)) + let outcome = TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(state.head_block_hash), + ))); + return Ok(Some(outcome)); } - // 4. we don't have the block to perform the update - // we assume the FCU is valid and at least the head is missing, + Ok(None) + } + + /// Handles the case where the head block is missing and needs to be downloaded. + /// + /// This is the fallback case when all other forkchoice update scenarios have been exhausted. + /// Returns a `TreeOutcome` with syncing status and download event. + fn handle_missing_block( + &self, + state: ForkchoiceState, + ) -> ProviderResult> { + // We don't have the block to perform the forkchoice update + // We assume the FCU is valid and at least the head is missing, // so we need to start syncing to it // // find the appropriate target to sync to, if we don't have the safe block hash then we // start syncing to the safe block via backfill first let target = if self.state.forkchoice_state_tracker.is_empty() && - // check that safe block is valid and missing - !state.safe_block_hash.is_zero() && - self.find_canonical_header(state.safe_block_hash).ok().flatten().is_none() + // check that safe block is valid and missing + !state.safe_block_hash.is_zero() && + self.find_canonical_header(state.safe_block_hash).ok().flatten().is_none() { debug!(target: "engine::tree", "missing safe block on initial FCU, downloading safe block"); state.safe_block_hash @@ -1929,8 +2037,18 @@ where fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { // check if the head was previously marked as invalid let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; - // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent)?)) + + // Try to prepare invalid response, but handle errors gracefully + match self.prepare_invalid_response(header.parent) { + Ok(status) => Ok(Some(status)), + Err(err) => { + debug!(target: "engine::tree", %err, "Failed to prepare invalid response for ancestor check"); + // Return a basic invalid status without latest valid hash + Ok(Some(PayloadStatus::from_status(PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), + }))) + } + } } /// Validate if block is correct and satisfies all the consensus rules that concern the header @@ -2687,7 +2805,9 @@ where // we're also persisting the finalized block on disk so we can reload it on // restart this is required by optimism which queries the finalized block: let _ = self.persistence.save_finalized_block_number(finalized.number()); - self.canonical_in_memory_state.set_finalized(finalized); + self.canonical_in_memory_state.set_finalized(finalized.clone()); + // Update finalized block height metric + self.metrics.tree.finalized_block_height.set(finalized.number() as f64); } } Err(err) => { @@ -2715,7 +2835,9 @@ where // we're also persisting the safe block on disk so we can reload it on // restart this is required by optimism which queries the safe block: let _ = self.persistence.save_safe_block_number(safe.number()); - self.canonical_in_memory_state.set_safe(safe); + self.canonical_in_memory_state.set_safe(safe.clone()); + // Update safe block height metric + self.metrics.tree.safe_block_height.set(safe.number() as f64); } } Err(err) => { @@ -2753,35 +2875,6 @@ where self.update_safe_block(state.safe_block_hash) } - /// Pre-validate forkchoice update and check whether it can be processed. - /// - /// This method returns the update outcome if validation fails or - /// the node is syncing and the update cannot be processed at the moment. - fn pre_validate_forkchoice_update( - &mut self, - state: ForkchoiceState, - ) -> ProviderResult> { - if state.head_block_hash.is_zero() { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // check if the new head hash is connected to any ancestor that we previously marked as - // invalid - let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); - if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { - return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) - } - - if !self.backfill_sync_state.is_idle() { - // We can only process new forkchoice updates if the pipeline is idle, since it requires - // exclusive access to the database - trace!(target: "engine::tree", "Pipeline is syncing, skipping forkchoice update"); - return Ok(Some(OnForkChoiceUpdated::syncing())) - } - - Ok(None) - } - /// Validates the payload attributes with respect to the header and fork choice state. /// /// Note: At this point, the fork choice update is considered to be VALID, however, we can still diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 8d9bd1ba2e0..e3090d60756 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -32,7 +32,7 @@ use reth_provider::{ use reth_revm::{db::BundleState, state::EvmState}; use reth_trie::TrieInput; use reth_trie_parallel::{ - proof_task::{ProofTaskCtx, ProofTaskManager}, + proof_task::{ProofTaskCtx, ProofWorkerHandle}, root::ParallelStateRootError, }; use reth_trie_sparse::{ @@ -45,7 +45,7 @@ use std::sync::{ mpsc::{self, channel, Sender}, Arc, }; -use tracing::{debug, instrument}; +use tracing::{debug, instrument, warn}; mod configured_sparse_trie; pub mod executor; @@ -166,6 +166,7 @@ where /// /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) + #[allow(clippy::type_complexity)] pub fn spawn>( &mut self, env: ExecutionEnv, @@ -174,7 +175,10 @@ where consistent_view: ConsistentDbView

, trie_input: TrieInput, config: &TreeConfig, - ) -> PayloadHandle, I::Tx>, I::Error> + ) -> Result< + PayloadHandle, I::Tx>, I::Error>, + (ParallelStateRootError, I, ExecutionEnv, StateProviderBuilder), + > where P: DatabaseProviderFactory + BlockReader @@ -185,8 +189,7 @@ where { let (to_sparse_trie, sparse_trie_rx) = channel(); // spawn multiproof task, save the trie input - let (trie_input, state_root_config) = - MultiProofConfig::new_from_input(consistent_view, trie_input); + let (trie_input, state_root_config) = MultiProofConfig::from_input(trie_input); self.trie_input = Some(trie_input); // Create and spawn the storage proof task @@ -195,12 +198,15 @@ where state_root_config.state_sorted.clone(), state_root_config.prefix_sets.clone(), ); + let storage_worker_count = config.storage_worker_count(); + let account_worker_count = config.account_worker_count(); let max_proof_task_concurrency = config.max_proof_task_concurrency() as usize; - let proof_task = ProofTaskManager::new( + let proof_handle = ProofWorkerHandle::new( self.executor.handle().clone(), - state_root_config.consistent_view.clone(), + consistent_view, task_ctx, - max_proof_task_concurrency, + storage_worker_count, + account_worker_count, ); // We set it to half of the proof task concurrency, because often for each multiproof we @@ -209,7 +215,7 @@ where let multi_proof_task = MultiProofTask::new( state_root_config, self.executor.clone(), - proof_task.handle(), + proof_handle.clone(), to_sparse_trie, max_multi_proof_task_concurrency, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), @@ -238,26 +244,14 @@ where let (state_root_tx, state_root_rx) = channel(); // Spawn the sparse trie task using any stored trie and parallel trie configuration. - self.spawn_sparse_trie_task(sparse_trie_rx, proof_task.handle(), state_root_tx); - - // spawn the proof task - self.executor.spawn_blocking(move || { - if let Err(err) = proof_task.run() { - // At least log if there is an error at any point - tracing::error!( - target: "engine::root", - ?err, - "Storage proof task returned an error" - ); - } - }); + self.spawn_sparse_trie_task(sparse_trie_rx, proof_handle, state_root_tx); - PayloadHandle { + Ok(PayloadHandle { to_multi_proof, prewarm_handle, state_root: Some(state_root_rx), transactions: execution_rx, - } + }) } /// Spawns a task that exclusively handles cache prewarming for transaction execution. @@ -392,7 +386,7 @@ where fn spawn_sparse_trie_task( &self, sparse_trie_rx: mpsc::Receiver, - proof_task_handle: BPF, + proof_worker_handle: BPF, state_root_tx: mpsc::Sender>, ) where BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, @@ -422,7 +416,7 @@ where let task = SparseTrieTask::<_, ConfiguredSparseTrie, ConfiguredSparseTrie>::new_with_cleared_trie( sparse_trie_rx, - proof_task_handle, + proof_worker_handle, self.trie_metrics.clone(), sparse_state_trie, ); @@ -857,14 +851,20 @@ mod tests { PrecompileCacheMap::default(), ); let provider = BlockchainProvider::new(factory).unwrap(); - let mut handle = payload_processor.spawn( - Default::default(), - core::iter::empty::, core::convert::Infallible>>(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - ConsistentDbView::new_with_latest_tip(provider).unwrap(), - TrieInput::from_state(hashed_state), - &TreeConfig::default(), - ); + let mut handle = + payload_processor + .spawn( + Default::default(), + core::iter::empty::< + Result, core::convert::Infallible>, + >(), + StateProviderBuilder::new(provider.clone(), genesis_hash, None), + ConsistentDbView::new_with_latest_tip(provider).unwrap(), + TrieInput::from_state(hashed_state), + &TreeConfig::default(), + ) + .map_err(|(err, ..)| err) + .expect("failed to spawn payload processor"); let mut state_hook = handle.state_hook(); diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index 6c7f5de40a3..a528b759570 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -12,14 +12,17 @@ use derive_more::derive::Deref; use metrics::Histogram; use reth_errors::ProviderError; use reth_metrics::Metrics; -use reth_provider::{providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, FactoryTx}; use reth_revm::state::EvmState; use reth_trie::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::TriePrefixSetsMut, updates::TrieUpdatesSorted, DecodedMultiProof, HashedPostState, HashedPostStateSorted, HashedStorage, MultiProofTargets, TrieInput, }; -use reth_trie_parallel::{proof::ParallelProof, proof_task::ProofTaskManagerHandle}; +use reth_trie_parallel::{ + proof::ParallelProof, + proof_task::{AccountMultiproofInput, ProofWorkerHandle}, + root::ParallelStateRootError, +}; use std::{ collections::{BTreeMap, VecDeque}, ops::DerefMut, @@ -62,9 +65,7 @@ impl SparseTrieUpdate { /// Common configuration for multi proof tasks #[derive(Debug, Clone)] -pub(super) struct MultiProofConfig { - /// View over the state in the database. - pub consistent_view: ConsistentDbView, +pub(super) struct MultiProofConfig { /// The sorted collection of cached in-memory intermediate trie nodes that /// can be reused for computation. pub nodes_sorted: Arc, @@ -76,17 +77,13 @@ pub(super) struct MultiProofConfig { pub prefix_sets: Arc, } -impl MultiProofConfig { - /// Creates a new state root config from the consistent view and the trie input. +impl MultiProofConfig { + /// Creates a new state root config from the trie input. /// /// This returns a cleared [`TrieInput`] so that we can reuse any allocated space in the /// [`TrieInput`]. - pub(super) fn new_from_input( - consistent_view: ConsistentDbView, - mut input: TrieInput, - ) -> (TrieInput, Self) { + pub(super) fn from_input(mut input: TrieInput) -> (TrieInput, Self) { let config = Self { - consistent_view, nodes_sorted: Arc::new(input.nodes.drain_into_sorted()), state_sorted: Arc::new(input.state.drain_into_sorted()), prefix_sets: Arc::new(input.prefix_sets.clone()), @@ -245,14 +242,14 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat /// A pending multiproof task, either [`StorageMultiproofInput`] or [`MultiproofInput`]. #[derive(Debug)] -enum PendingMultiproofTask { +enum PendingMultiproofTask { /// A storage multiproof task input. - Storage(StorageMultiproofInput), + Storage(StorageMultiproofInput), /// A regular multiproof task input. - Regular(MultiproofInput), + Regular(MultiproofInput), } -impl PendingMultiproofTask { +impl PendingMultiproofTask { /// Returns the proof sequence number of the task. const fn proof_sequence_number(&self) -> u64 { match self { @@ -278,22 +275,22 @@ impl PendingMultiproofTask { } } -impl From> for PendingMultiproofTask { - fn from(input: StorageMultiproofInput) -> Self { +impl From for PendingMultiproofTask { + fn from(input: StorageMultiproofInput) -> Self { Self::Storage(input) } } -impl From> for PendingMultiproofTask { - fn from(input: MultiproofInput) -> Self { +impl From for PendingMultiproofTask { + fn from(input: MultiproofInput) -> Self { Self::Regular(input) } } /// Input parameters for spawning a dedicated storage multiproof calculation. #[derive(Debug)] -struct StorageMultiproofInput { - config: MultiProofConfig, +struct StorageMultiproofInput { + config: MultiProofConfig, source: Option, hashed_state_update: HashedPostState, hashed_address: B256, @@ -303,7 +300,7 @@ struct StorageMultiproofInput { multi_added_removed_keys: Arc, } -impl StorageMultiproofInput { +impl StorageMultiproofInput { /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. fn send_empty_proof(self) { let _ = self.state_root_message_sender.send(MultiProofMessage::EmptyProof { @@ -315,8 +312,8 @@ impl StorageMultiproofInput { /// Input parameters for spawning a multiproof calculation. #[derive(Debug)] -struct MultiproofInput { - config: MultiProofConfig, +struct MultiproofInput { + config: MultiProofConfig, source: Option, hashed_state_update: HashedPostState, proof_targets: MultiProofTargets, @@ -325,7 +322,7 @@ struct MultiproofInput { multi_added_removed_keys: Option>, } -impl MultiproofInput { +impl MultiproofInput { /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. fn send_empty_proof(self) { let _ = self.state_root_message_sender.send(MultiProofMessage::EmptyProof { @@ -340,17 +337,17 @@ impl MultiproofInput { /// concurrency, further calculation requests are queued and spawn later, after /// availability has been signaled. #[derive(Debug)] -pub struct MultiproofManager { +pub struct MultiproofManager { /// Maximum number of concurrent calculations. max_concurrent: usize, /// Currently running calculations. inflight: usize, /// Queued calculations. - pending: VecDeque>, + pending: VecDeque, /// Executor for tasks executor: WorkloadExecutor, - /// Sender to the storage proof task. - storage_proof_task_handle: ProofTaskManagerHandle>, + /// Handle to the proof worker pools (storage and account). + proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. /// @@ -367,15 +364,12 @@ pub struct MultiproofManager { metrics: MultiProofTaskMetrics, } -impl MultiproofManager -where - Factory: DatabaseProviderFactory + Clone + 'static, -{ +impl MultiproofManager { /// Creates a new [`MultiproofManager`]. fn new( executor: WorkloadExecutor, metrics: MultiProofTaskMetrics, - storage_proof_task_handle: ProofTaskManagerHandle>, + proof_worker_handle: ProofWorkerHandle, max_concurrent: usize, ) -> Self { Self { @@ -384,7 +378,7 @@ where executor, inflight: 0, metrics, - storage_proof_task_handle, + proof_worker_handle, missed_leaves_storage_roots: Default::default(), } } @@ -395,7 +389,7 @@ where /// Spawns a new multiproof calculation or enqueues it for later if /// `max_concurrent` are already inflight. - fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { + fn spawn_or_queue(&mut self, input: PendingMultiproofTask) { // If there are no proof targets, we can just send an empty multiproof back immediately if input.proof_targets_is_empty() { debug!( @@ -429,7 +423,7 @@ where /// Spawns a multiproof task, dispatching to `spawn_storage_proof` if the input is a storage /// multiproof, and dispatching to `spawn_multiproof` otherwise. - fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { + fn spawn_multiproof_task(&mut self, input: PendingMultiproofTask) { match input { PendingMultiproofTask::Storage(storage_input) => { self.spawn_storage_proof(storage_input); @@ -441,7 +435,7 @@ where } /// Spawns a single storage proof calculation task. - fn spawn_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { + fn spawn_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { let StorageMultiproofInput { config, source, @@ -453,7 +447,7 @@ where multi_added_removed_keys, } = storage_multiproof_input; - let storage_proof_task_handle = self.storage_proof_task_handle.clone(); + let storage_proof_worker_handle = self.proof_worker_handle.clone(); let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); self.executor.spawn_blocking(move || { @@ -468,12 +462,11 @@ where ); let start = Instant::now(); let proof_result = ParallelProof::new( - config.consistent_view, config.nodes_sorted, config.state_sorted, config.prefix_sets, missed_leaves_storage_roots, - storage_proof_task_handle.clone(), + storage_proof_worker_handle, ) .with_branch_node_masks(true) .with_multi_added_removed_keys(Some(multi_added_removed_keys)) @@ -516,7 +509,7 @@ where } /// Spawns a single multiproof calculation task. - fn spawn_multiproof(&mut self, multiproof_input: MultiproofInput) { + fn spawn_multiproof(&mut self, multiproof_input: MultiproofInput) { let MultiproofInput { config, source, @@ -526,7 +519,7 @@ where state_root_message_sender, multi_added_removed_keys, } = multiproof_input; - let storage_proof_task_handle = self.storage_proof_task_handle.clone(); + let account_proof_worker_handle = self.proof_worker_handle.clone(); let missed_leaves_storage_roots = self.missed_leaves_storage_roots.clone(); self.executor.spawn_blocking(move || { @@ -544,17 +537,32 @@ where ); let start = Instant::now(); - let proof_result = ParallelProof::new( - config.consistent_view, - config.nodes_sorted, - config.state_sorted, - config.prefix_sets, + + // Extend prefix sets with targets + let frozen_prefix_sets = + ParallelProof::extend_prefix_sets_with_targets(&config.prefix_sets, &proof_targets); + + // Queue account multiproof to worker pool + let input = AccountMultiproofInput { + targets: proof_targets, + prefix_sets: frozen_prefix_sets, + collect_branch_node_masks: true, + multi_added_removed_keys, missed_leaves_storage_roots, - storage_proof_task_handle.clone(), - ) - .with_branch_node_masks(true) - .with_multi_added_removed_keys(multi_added_removed_keys) - .decoded_multiproof(proof_targets); + }; + + let proof_result: Result = (|| { + let receiver = account_proof_worker_handle + .dispatch_account_multiproof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; + + receiver + .recv() + .map_err(|_| { + ParallelStateRootError::Other("Account multiproof channel closed".into()) + })? + .map(|(proof, _stats)| proof) + })(); let elapsed = start.elapsed(); trace!( target: "engine::root", @@ -645,13 +653,13 @@ pub(crate) struct MultiProofTaskMetrics { /// Then it updates relevant leaves according to the result of the transaction. /// This feeds updates to the sparse trie task. #[derive(Debug)] -pub(super) struct MultiProofTask { +pub(super) struct MultiProofTask { /// The size of proof targets chunk to spawn in one calculation. /// /// If [`None`], then chunking is disabled. chunk_size: Option, /// Task configuration. - config: MultiProofConfig, + config: MultiProofConfig, /// Receiver for state root related messages. rx: Receiver, /// Sender for state root related messages. @@ -665,20 +673,17 @@ pub(super) struct MultiProofTask { /// Proof sequencing handler. proof_sequencer: ProofSequencer, /// Manages calculation of multiproofs. - multiproof_manager: MultiproofManager, + multiproof_manager: MultiproofManager, /// multi proof task metrics metrics: MultiProofTaskMetrics, } -impl MultiProofTask -where - Factory: DatabaseProviderFactory + Clone + 'static, -{ +impl MultiProofTask { /// Creates a new multi proof task with the unified message channel pub(super) fn new( - config: MultiProofConfig, + config: MultiProofConfig, executor: WorkloadExecutor, - proof_task_handle: ProofTaskManagerHandle>, + proof_worker_handle: ProofWorkerHandle, to_sparse_trie: Sender, max_concurrency: usize, chunk_size: Option, @@ -698,7 +703,7 @@ where multiproof_manager: MultiproofManager::new( executor, metrics.clone(), - proof_task_handle, + proof_worker_handle, max_concurrency, ), metrics, @@ -1202,44 +1207,31 @@ fn get_proof_targets( mod tests { use super::*; use alloy_primitives::map::B256Set; - use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, BlockReader, + DatabaseProviderFactory, + }; use reth_trie::{MultiProof, TrieInput}; - use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofTaskManager}; + use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use revm_primitives::{B256, U256}; - use std::sync::Arc; - - fn create_state_root_config(factory: F, input: TrieInput) -> MultiProofConfig - where - F: DatabaseProviderFactory + Clone + 'static, - { - let consistent_view = ConsistentDbView::new(factory, None); - let nodes_sorted = Arc::new(input.nodes.clone().into_sorted()); - let state_sorted = Arc::new(input.state.clone().into_sorted()); - let prefix_sets = Arc::new(input.prefix_sets); - - MultiProofConfig { consistent_view, nodes_sorted, state_sorted, prefix_sets } - } - fn create_test_state_root_task(factory: F) -> MultiProofTask + fn create_test_state_root_task(factory: F) -> MultiProofTask where F: DatabaseProviderFactory + Clone + 'static, { let executor = WorkloadExecutor::default(); - let config = create_state_root_config(factory, TrieInput::default()); + let (_trie_input, config) = MultiProofConfig::from_input(TrieInput::default()); let task_ctx = ProofTaskCtx::new( config.nodes_sorted.clone(), config.state_sorted.clone(), config.prefix_sets.clone(), ); - let proof_task = ProofTaskManager::new( - executor.handle().clone(), - config.consistent_view.clone(), - task_ctx, - 1, - ); + let consistent_view = ConsistentDbView::new(factory, None); + let proof_handle = + ProofWorkerHandle::new(executor.handle().clone(), consistent_view, task_ctx, 1, 1); let channel = channel(); - MultiProofTask::new(config, executor, proof_task.handle(), channel.0, 1, None) + MultiProofTask::new(config, executor, proof_handle, channel.0, 1, None) } #[test] diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index e2c41b0ceba..f3525d8e9c7 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -43,6 +43,7 @@ use reth_revm::db::State; use reth_trie::{updates::TrieUpdates, HashedPostState, KeccakKeyHasher, TrieInput}; use reth_trie_db::DatabaseHashedPostState; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; +use revm::context::Block; use std::{collections::HashMap, sync::Arc, time::Instant}; use tracing::{debug, debug_span, error, info, trace, warn}; @@ -492,13 +493,15 @@ where ctx.state(), ) { Ok(result) => { + let elapsed = root_time.elapsed(); info!( target: "engine::tree", block = ?block_num_hash, regular_state_root = ?result.0, + ?elapsed, "Regular root task finished" ); - maybe_state_root = Some((result.0, result.1, root_time.elapsed())); + maybe_state_root = Some((result.0, result.1, elapsed)); } Err(error) => { debug!(target: "engine::tree", %error, "Parallel state root computation failed"); @@ -640,7 +643,7 @@ where T: PayloadTypes>, Evm: ConfigureEngineEvm, { - let num_hash = NumHash::new(env.evm_env.block_env.number.to(), env.hash); + let num_hash = NumHash::new(env.evm_env.block_env.number().to(), env.hash); let span = debug_span!(target: "engine::tree", "execute_block", num = ?num_hash.number, hash = ?num_hash.hash); let _enter = span.enter(); @@ -877,17 +880,36 @@ where // too expensive because it requires walking all paths in every proof. let spawn_start = Instant::now(); let (handle, strategy) = if trie_input.prefix_sets.is_empty() { - ( - self.payload_processor.spawn( - env, - txs, - provider_builder, - consistent_view, - trie_input, - &self.config, - ), - StateRootStrategy::StateRootTask, - ) + match self.payload_processor.spawn( + env, + txs, + provider_builder, + consistent_view, + trie_input, + &self.config, + ) { + Ok(handle) => { + // Successfully spawned with state root task support + (handle, StateRootStrategy::StateRootTask) + } + Err((error, txs, env, provider_builder)) => { + // Failed to spawn proof workers, fallback to parallel state root + error!( + target: "engine::tree", + block=?block_num_hash, + ?error, + "Failed to spawn proof workers, falling back to parallel state root" + ); + ( + self.payload_processor.spawn_cache_exclusive( + env, + txs, + provider_builder, + ), + StateRootStrategy::Parallel, + ) + } + } // if prefix sets are not empty, we spawn a task that exclusively handles cache // prewarming for transaction execution } else { diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index b2774b8b17e..17b5950e077 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -56,6 +56,7 @@ impl reth_engine_primitives::PayloadValidator for MockEngineVali reth_payload_primitives::NewPayloadError::Other(format!("{e:?}").into()) })?; let sealed = block.seal_slow(); + sealed.try_recover().map_err(|e| reth_payload_primitives::NewPayloadError::Other(e.into())) } } @@ -1705,3 +1706,305 @@ mod payload_execution_tests { } } } + +/// Test suite for the refactored `on_forkchoice_updated` helper methods +#[cfg(test)] +mod forkchoice_updated_tests { + use super::*; + use alloy_primitives::Address; + + /// Test that validates the forkchoice state pre-validation logic + #[tokio::test] + async fn test_validate_forkchoice_state() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Test 1: Zero head block hash should return early with invalid state + let zero_state = ForkchoiceState { + head_block_hash: B256::ZERO, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(zero_state).unwrap(); + assert!(result.is_some(), "Zero head block hash should return early"); + let outcome = result.unwrap(); + // For invalid state, we expect an error response + assert!(matches!(outcome, OnForkChoiceUpdated { .. })); + + // Test 2: Valid state with backfill active should return syncing + test_harness.tree.backfill_sync_state = BackfillSyncState::Active; + let valid_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(valid_state).unwrap(); + assert!(result.is_some(), "Backfill active should return early"); + let outcome = result.unwrap(); + // We need to await the outcome to check the payload status + let fcu_result = outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + + // Test 3: Valid state with idle backfill should continue processing + test_harness.tree.backfill_sync_state = BackfillSyncState::Idle; + let valid_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.validate_forkchoice_state(valid_state).unwrap(); + assert!(result.is_none(), "Valid state should continue processing"); + } + + /// Test that verifies canonical head handling + #[tokio::test] + async fn test_handle_canonical_head() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // Test 1: Head is already canonical, no payload attributes + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "Should return outcome for canonical head"); + let outcome = result.unwrap(); + let fcu_result = outcome.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test 2: Head is not canonical - should return None to continue processing + let non_canonical_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(non_canonical_state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_none(), "Non-canonical head should return None"); + } + + /// Test that verifies chain update application + #[tokio::test] + async fn test_apply_chain_update() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create a chain of blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..5).collect(); + test_harness = test_harness.with_blocks(blocks.clone()); + + let new_head = blocks[2].recovered_block().hash(); + + // Test 1: Apply chain update to a new head + let state = ForkchoiceState { + head_block_hash: new_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .apply_chain_update(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "Should apply chain update for new head"); + let outcome = result.unwrap(); + let fcu_result = outcome.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test 2: Try to apply chain update to missing block + let missing_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .apply_chain_update(missing_state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_none(), "Missing block should return None"); + } + + /// Test that verifies missing block handling + #[tokio::test] + async fn test_handle_missing_block() { + let chain_spec = MAINNET.clone(); + let test_harness = TestHarness::new(chain_spec); + + let state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness.tree.handle_missing_block(state).unwrap(); + + // Should return syncing status with download event + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + assert!(result.event.is_some()); + + if let Some(TreeEvent::Download(download_request)) = result.event { + match download_request { + DownloadRequest::BlockSet(block_set) => { + assert_eq!(block_set.len(), 1); + } + _ => panic!("Expected single block download request"), + } + } + } + + /// Test the complete `on_forkchoice_updated` flow with all helper methods + #[tokio::test] + async fn test_on_forkchoice_updated_integration() { + reth_tracing::init_test_tracing(); + + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks.clone()); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // Test Case 1: FCU to existing canonical head + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: canonical_head, + finalized_block_hash: canonical_head, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_valid()); + + // Test Case 2: FCU to missing block + let missing_state = ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(missing_state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing()); + assert!(result.event.is_some(), "Should trigger download event for missing block"); + + // Test Case 3: FCU during backfill sync + test_harness.tree.backfill_sync_state = BackfillSyncState::Active; + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .on_forkchoice_updated(state, None, EngineApiMessageVersion::default()) + .unwrap(); + let fcu_result = result.outcome.await.unwrap(); + assert!(fcu_result.payload_status.is_syncing(), "Should return syncing during backfill"); + } + + /// Test metrics recording in forkchoice updated + #[tokio::test] + async fn test_record_forkchoice_metrics() { + let chain_spec = MAINNET.clone(); + let test_harness = TestHarness::new(chain_spec); + + // Get initial metrics state by checking if metrics are recorded + // We can't directly get counter values, but we can verify the methods are called + + // Test without attributes + let attrs_none = None; + test_harness.tree.record_forkchoice_metrics(&attrs_none); + + // Test with attributes + let attrs_some = Some(alloy_rpc_types_engine::PayloadAttributes { + timestamp: 1000, + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: None, + parent_beacon_block_root: None, + }); + test_harness.tree.record_forkchoice_metrics(&attrs_some); + + // We can't directly verify counter values since they're private metrics + // But we can verify the methods don't panic and execute successfully + } + + /// Test edge case: FCU with invalid ancestor + #[tokio::test] + async fn test_fcu_with_invalid_ancestor() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Mark a block as invalid + let invalid_block_hash = B256::random(); + test_harness.tree.state.invalid_headers.insert(BlockWithParent { + block: NumHash::new(1, invalid_block_hash), + parent: B256::ZERO, + }); + + // Test FCU that points to a descendant of the invalid block + // This is a bit tricky to test directly, but we can verify the check_invalid_ancestor + // method + let result = test_harness.tree.check_invalid_ancestor(invalid_block_hash).unwrap(); + assert!(result.is_some(), "Should detect invalid ancestor"); + } + + /// Test `OpStack` specific behavior with canonical head + #[tokio::test] + async fn test_opstack_canonical_head_behavior() { + let chain_spec = MAINNET.clone(); + let mut test_harness = TestHarness::new(chain_spec); + + // Set engine kind to OpStack + test_harness.tree.engine_kind = EngineApiKind::OpStack; + + // Create test blocks + let blocks: Vec<_> = test_harness.block_builder.get_executed_blocks(0..3).collect(); + test_harness = test_harness.with_blocks(blocks); + + let canonical_head = test_harness.tree.state.tree_state.canonical_block_hash(); + + // For OpStack, even if head is already canonical, we should still process payload + // attributes + let state = ForkchoiceState { + head_block_hash: canonical_head, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, + }; + + let result = test_harness + .tree + .handle_canonical_head(state, &None, EngineApiMessageVersion::default()) + .unwrap(); + assert!(result.is_some(), "OpStack should handle canonical head"); + } +} diff --git a/crates/era-downloader/src/client.rs b/crates/era-downloader/src/client.rs index 298248ff3e9..36ed93e1e2f 100644 --- a/crates/era-downloader/src/client.rs +++ b/crates/era-downloader/src/client.rs @@ -128,8 +128,6 @@ impl EraClient { let Some(number) = self.file_name_to_number(name) && (number < index || number >= last) { - eprintln!("Deleting file {}", entry.path().display()); - eprintln!("{number} < {index} || {number} >= {last}"); reth_fs_util::remove_file(entry.path())?; } } diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 12bafed6113..b1c3cd309c0 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -286,12 +286,12 @@ where { let mut last_header_number = match block_numbers.start_bound() { Bound::Included(&number) => number, - Bound::Excluded(&number) => number.saturating_sub(1), + Bound::Excluded(&number) => number.saturating_add(1), Bound::Unbounded => 0, }; let target = match block_numbers.end_bound() { Bound::Included(&number) => Some(number), - Bound::Excluded(&number) => Some(number.saturating_add(1)), + Bound::Excluded(&number) => Some(number.saturating_sub(1)), Bound::Unbounded => None, }; diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 01a7751e77b..e232ea0cdb1 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -35,7 +35,9 @@ tracing.workspace = true tempfile.workspace = true [features] -default = ["jemalloc"] +default = ["jemalloc", "otlp"] + +otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] dev = ["reth-cli-commands/arbitrary"] diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index e99dae2ac77..805c9144257 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -111,7 +111,18 @@ where /// If file logging is enabled, this function stores guard to the struct. pub fn init_tracing(&mut self) -> Result<()> { if self.guard.is_none() { - let layers = self.layers.take().unwrap_or_default(); + let mut layers = self.layers.take().unwrap_or_default(); + + #[cfg(feature = "otlp")] + if let Some(output_type) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); + layers.with_span_layer( + "reth".to_string(), + output_type.clone(), + self.cli.traces.otlp_level, + )?; + } + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); } diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 1ebc40c7f8e..bb687df684a 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -18,7 +18,10 @@ use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_api::NodePrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::{args::LogArgs, version::version_metadata}; +use reth_node_core::{ + args::{LogArgs, TraceArgs}, + version::version_metadata, +}; use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::{DefaultRpcModuleValidator, RpcModuleValidator}; use reth_tracing::FileWorkerGuard; @@ -43,6 +46,10 @@ pub struct Cli< #[command(flatten)] pub logs: LogArgs, + /// The tracing configuration for the CLI. + #[command(flatten)] + pub traces: TraceArgs, + /// Type marker for the RPC module validator #[arg(skip)] pub _phantom: PhantomData, @@ -212,8 +219,11 @@ impl /// /// If file logging is enabled, this function returns a guard that must be kept alive to ensure /// that all logs are flushed to disk. + /// If an OTLP endpoint is specified, it will export metrics to the configured collector. pub fn init_tracing(&self) -> eyre::Result> { - let guard = self.logs.init_tracing()?; + let layers = reth_tracing::Layers::new(); + + let guard = self.logs.init_tracing_with_layers(layers)?; Ok(guard) } } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index a93e3312525..00cf303e25d 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -196,6 +196,7 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_consensus_common::validation::validate_against_parent_gas_limit; @@ -215,7 +216,7 @@ mod tests { let child = header_with_gas_limit((parent.gas_limit + 5) as u64); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::

::default()), Ok(()) ); } @@ -226,7 +227,7 @@ mod tests { let child = header_with_gas_limit(MINIMUM_GAS_LIMIT - 1); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: child.gas_limit as u64 }) ); } @@ -239,7 +240,7 @@ mod tests { ); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Err(ConsensusError::GasLimitInvalidIncrease { parent_gas_limit: parent.gas_limit, child_gas_limit: child.gas_limit, @@ -253,7 +254,7 @@ mod tests { let child = header_with_gas_limit(parent.gas_limit - 5); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Ok(()) ); } @@ -266,7 +267,7 @@ mod tests { ); assert_eq!( - validate_against_parent_gas_limit(&child, &parent, &ChainSpec::default()), + validate_against_parent_gas_limit(&child, &parent, &ChainSpec::
::default()), Err(ConsensusError::GasLimitInvalidDecrease { parent_gas_limit: parent.gas_limit, child_gas_limit: child.gas_limit, diff --git a/crates/ethereum/evm/src/build.rs b/crates/ethereum/evm/src/build.rs index 5f5e014d297..85d4cae311b 100644 --- a/crates/ethereum/evm/src/build.rs +++ b/crates/ethereum/evm/src/build.rs @@ -1,7 +1,7 @@ use alloc::{sync::Arc, vec::Vec}; use alloy_consensus::{ proofs::{self, calculate_receipt_root}, - Block, BlockBody, BlockHeader, Header, Transaction, TxReceipt, EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, BlockHeader, Header, TxReceipt, EMPTY_OMMER_ROOT_HASH, }; use alloy_eips::merge::BEACON_NONCE; use alloy_evm::{block::BlockExecutorFactory, eth::EthBlockExecutionCtx}; @@ -10,6 +10,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::execute::{BlockAssembler, BlockAssemblerInput, BlockExecutionError}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{logs_bloom, Receipt, SignedTransaction}; +use revm::context::Block as _; /// Block builder for Ethereum. #[derive(Debug, Clone)] @@ -47,12 +48,12 @@ where execution_ctx: ctx, parent, transactions, - output: BlockExecutionResult { receipts, requests, gas_used }, + output: BlockExecutionResult { receipts, requests, gas_used, blob_gas_used }, state_root, .. } = input; - let timestamp = evm_env.block_env.timestamp.saturating_to(); + let timestamp = evm_env.block_env.timestamp().saturating_to(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = calculate_receipt_root( @@ -73,12 +74,11 @@ where .then(|| requests.requests_hash()); let mut excess_blob_gas = None; - let mut blob_gas_used = None; + let mut block_blob_gas_used = None; // only determine cancun fields when active if self.chain_spec.is_cancun_active_at_timestamp(timestamp) { - blob_gas_used = - Some(transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum()); + block_blob_gas_used = Some(*blob_gas_used); excess_blob_gas = if self.chain_spec.is_cancun_active_at_timestamp(parent.timestamp) { parent.maybe_next_block_excess_blob_gas( self.chain_spec.blob_params_at_timestamp(timestamp), @@ -96,23 +96,23 @@ where let header = Header { parent_hash: ctx.parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: evm_env.block_env.beneficiary, + beneficiary: evm_env.block_env.beneficiary(), state_root, transactions_root, receipts_root, withdrawals_root, logs_bloom, timestamp, - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number.saturating_to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + base_fee_per_gas: Some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().saturating_to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: self.extra_data.clone(), parent_beacon_block_root: ctx.parent_beacon_block_root, - blob_gas_used, + blob_gas_used: block_blob_gas_used, excess_blob_gas, requests_hash, }; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index eaf91f0c7be..c0f8adc9c54 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -132,6 +132,7 @@ where + FromRecoveredTx + FromTxWithEncoded, Spec = SpecId, + BlockEnv = BlockEnv, Precompiles = PrecompilesMap, > + Clone + Debug @@ -154,7 +155,7 @@ where &self.block_assembler } - fn evm_env(&self, header: &Header) -> Result { + fn evm_env(&self, header: &Header) -> Result, Self::Error> { Ok(EvmEnv::for_eth_block( header, self.chain_spec(), @@ -217,6 +218,7 @@ where + FromRecoveredTx + FromTxWithEncoded, Spec = SpecId, + BlockEnv = BlockEnv, Precompiles = PrecompilesMap, > + Clone + Debug diff --git a/crates/ethereum/evm/src/test_utils.rs b/crates/ethereum/evm/src/test_utils.rs index 87875dbc848..fe791b9f5fd 100644 --- a/crates/ethereum/evm/src/test_utils.rs +++ b/crates/ethereum/evm/src/test_utils.rs @@ -125,6 +125,7 @@ impl<'a, DB: Database, I: Inspector>>> BlockExec reqs }), gas_used: 0, + blob_gas_used: 0, }; evm.db_mut().bundle_state = bundle; diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 4d77b5e26ec..881539923ae 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -15,7 +15,6 @@ use reth_ethereum_engine_primitives::{ use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; use reth_evm::{ eth::spec::EthExecutorSpec, ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes, - SpecFor, TxEnvFor, }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ @@ -159,10 +158,9 @@ where NetworkT: RpcTypes>>, EthRpcConverterFor: RpcConvert< Primitives = PrimitivesTy, - TxEnv = TxEnvFor, Error = EthApiError, Network = NetworkT, - Spec = SpecFor, + Evm = N::Evm, >, EthApiError: FromEvmError, { diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 8c969c9d44c..7f40e983bc8 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -176,8 +176,8 @@ where debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; - let block_gas_limit: u64 = builder.evm_mut().block().gas_limit; - let base_fee = builder.evm_mut().block().basefee; + let block_gas_limit: u64 = builder.evm_mut().block().gas_limit(); + let base_fee = builder.evm_mut().block().basefee(); let mut best_txs = best_txs(BestTransactionsAttributes::new( base_fee, diff --git a/crates/evm/evm/src/aliases.rs b/crates/evm/evm/src/aliases.rs index 6bb1ab1c35a..7758f0aea17 100644 --- a/crates/evm/evm/src/aliases.rs +++ b/crates/evm/evm/src/aliases.rs @@ -11,6 +11,9 @@ pub type EvmFactoryFor = /// Helper to access [`EvmFactory::Spec`] for a given [`ConfigureEvm`]. pub type SpecFor = as EvmFactory>::Spec; +/// Helper to access [`EvmFactory::BlockEnv`] for a given [`ConfigureEvm`]. +pub type BlockEnvFor = as EvmFactory>::BlockEnv; + /// Helper to access [`EvmFactory::Evm`] for a given [`ConfigureEvm`]. pub type EvmFor = as EvmFactory>::Evm; @@ -31,7 +34,7 @@ pub type ExecutionCtxFor<'a, Evm> = <::BlockExecutorFactory as BlockExecutorFactory>::ExecutionCtx<'a>; /// Type alias for [`EvmEnv`] for a given [`ConfigureEvm`]. -pub type EvmEnvFor = EvmEnv>; +pub type EvmEnvFor = EvmEnv, BlockEnvFor>; /// Helper trait to bound [`Inspector`] for a [`ConfigureEvm`]. pub trait InspectorFor: Inspector> {} diff --git a/crates/evm/evm/src/engine.rs b/crates/evm/evm/src/engine.rs index 5c721d811bc..5b46a086170 100644 --- a/crates/evm/evm/src/engine.rs +++ b/crates/evm/evm/src/engine.rs @@ -2,7 +2,7 @@ use crate::{execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor}; /// [`ConfigureEvm`] extension providing methods for executing payloads. pub trait ConfigureEngineEvm: ConfigureEvm { - /// Returns an [`EvmEnvFor`] for the given payload. + /// Returns an [`crate::EvmEnv`] for the given payload. fn evm_env_for_payload(&self, payload: &ExecutionData) -> Result, Self::Error>; /// Returns an [`ExecutionCtxFor`] for the given payload. diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 5e072f56e45..6ea7b928514 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -149,6 +149,11 @@ pub trait Executor: Sized { } /// Helper type for the output of executing a block. +/// +/// Deprecated: this type is unused within reth and will be removed in the next +/// major release. Use `reth_execution_types::BlockExecutionResult` or +/// `reth_execution_types::BlockExecutionOutput`. +#[deprecated(note = "Use reth_execution_types::BlockExecutionResult or BlockExecutionOutput")] #[derive(Debug, Clone)] pub struct ExecuteOutput { /// Receipts obtained after executing a block. @@ -198,7 +203,8 @@ pub struct BlockAssemblerInput<'a, 'b, F: BlockExecutorFactory, H = Header> { /// Configuration of EVM used when executing the block. /// /// Contains context relevant to EVM such as [`revm::context::BlockEnv`]. - pub evm_env: EvmEnv<::Spec>, + pub evm_env: + EvmEnv<::Spec, ::BlockEnv>, /// [`BlockExecutorFactory::ExecutionCtx`] used to execute the block. pub execution_ctx: F::ExecutionCtx<'a>, /// Parent block header. @@ -220,7 +226,10 @@ impl<'a, 'b, F: BlockExecutorFactory, H> BlockAssemblerInput<'a, 'b, F, H> { /// Creates a new [`BlockAssemblerInput`]. #[expect(clippy::too_many_arguments)] pub fn new( - evm_env: EvmEnv<::Spec>, + evm_env: EvmEnv< + ::Spec, + ::BlockEnv, + >, execution_ctx: F::ExecutionCtx<'a>, parent: &'a SealedHeader, transactions: Vec, @@ -460,6 +469,7 @@ where Evm: Evm< Spec = ::Spec, HaltReason = ::HaltReason, + BlockEnv = ::BlockEnv, DB = &'a mut State, >, Transaction = N::SignedTx, diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index 122edc632e7..466e1762321 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -186,6 +186,7 @@ mod tests { // wal with 1 block and tx // #[test] + #[ignore] fn decode_notification_wal() { let wal = include_bytes!("../../test-data/28.wal"); let notification: reth_exex_types::serde_bincode_compat::ExExNotification< diff --git a/crates/net/network/src/budget.rs b/crates/net/network/src/budget.rs index 824148387b4..f1d9ca87469 100644 --- a/crates/net/network/src/budget.rs +++ b/crates/net/network/src/budget.rs @@ -35,13 +35,6 @@ pub const DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS: u32 = DEFAULT_BUD // Default is 40 pending pool imports. pub const DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS: u32 = 4 * DEFAULT_BUDGET_TRY_DRAIN_STREAM; -/// Default budget to try and stream hashes of successfully imported transactions from the pool. -/// -/// Default is naturally same as the number of transactions to attempt importing, -/// [`DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS`], so 40 pool imports. -pub const DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS: u32 = - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS; - /// Polls the given stream. Breaks with `true` if there maybe is more work. #[macro_export] macro_rules! poll_nested_stream_with_budget { diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 99c3629b42e..4e94c5b9cd0 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -229,6 +229,8 @@ pub struct NetworkConfigBuilder { handshake: Arc, /// List of block hashes to check for required blocks. required_block_hashes: Vec, + /// Optional network id + network_id: Option, /// The header transform type. header_transform: Option>>, } @@ -272,6 +274,7 @@ impl NetworkConfigBuilder { nat: None, handshake: Arc::new(EthHandshake::default()), required_block_hashes: Vec::new(), + network_id: None, header_transform: None, } } @@ -593,6 +596,12 @@ impl NetworkConfigBuilder { self } + /// Set the optional network id. + pub const fn network_id(mut self, network_id: Option) -> Self { + self.network_id = network_id; + self + } + /// Sets the header transform type. pub fn header_transform( mut self, @@ -635,6 +644,7 @@ impl NetworkConfigBuilder { nat, handshake, required_block_hashes, + network_id, header_transform, } = self; @@ -662,7 +672,11 @@ impl NetworkConfigBuilder { hello_message.port = listener_addr.port(); // set the status - let status = UnifiedStatus::spec_builder(&chain_spec, &head); + let mut status = UnifiedStatus::spec_builder(&chain_spec, &head); + + if let Some(id) = network_id { + status.chain = id.into(); + } // set a fork filter based on the chain spec and head let fork_filter = chain_spec.fork_filter(head); diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 32f90899851..0044c1f92e1 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -924,6 +924,16 @@ impl QueuedOutgoingMessages { } } +impl Drop for QueuedOutgoingMessages { + fn drop(&mut self) { + // Ensure gauge is decremented for any remaining items to avoid metric leak on teardown. + let remaining = self.messages.len(); + if remaining > 0 { + self.count.decrement(remaining as f64); + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 9eb07e7b1a0..f4ef42523d5 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -28,8 +28,7 @@ use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_B use crate::{ budget::{ DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - DEFAULT_BUDGET_TRY_DRAIN_STREAM, + DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_STREAM, }, cache::LruCache, duration_metered_exec, metered_poll_nested_stream_with_budget, @@ -77,7 +76,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; -use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; +use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, trace}; /// The future for importing transactions into the pool. @@ -339,7 +338,7 @@ pub struct TransactionsManager< /// - no nonce gaps /// - all dynamic fee requirements are (currently) met /// - account has enough balance to cover the transaction's gas - pending_transactions: ReceiverStream, + pending_transactions: mpsc::Receiver, /// Incoming events from the [`NetworkManager`](crate::NetworkManager). transaction_events: UnboundedMeteredReceiver>, /// How the `TransactionsManager` is configured. @@ -422,7 +421,7 @@ impl peers: Default::default(), command_tx, command_rx: UnboundedReceiverStream::new(command_rx), - pending_transactions: ReceiverStream::new(pending), + pending_transactions: pending, transaction_events: UnboundedMeteredReceiver::new( from_network, NETWORK_POOL_TRANSACTIONS_SCOPE, @@ -1529,14 +1528,16 @@ where // We don't expect this buffer to be large, since only pending transactions are // emitted here. let mut new_txs = Vec::new(); - let maybe_more_pending_txns = metered_poll_nested_stream_with_budget!( - poll_durations.acc_imported_txns, - "net::tx", - "Pending transactions stream", - DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - this.pending_transactions.poll_next_unpin(cx), - |hash| new_txs.push(hash) - ); + let maybe_more_pending_txns = match this.pending_transactions.poll_recv_many( + cx, + &mut new_txs, + SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE, + ) { + Poll::Ready(count) => { + count == SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE + } + Poll::Pending => false, + }; if !new_txs.is_empty() { this.on_new_pending_transactions(new_txs); } diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 53c46f79b2a..ae4446ce51b 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -1,7 +1,6 @@ //! Pool component for the node builder. use crate::{BuilderContext, FullNodeTypes}; - use alloy_primitives::Address; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; @@ -167,14 +166,12 @@ where pub fn create_blob_store( ctx: &BuilderContext, ) -> eyre::Result { - let data_dir = ctx.config().datadir(); - Ok(reth_transaction_pool::blobstore::DiskFileBlobStore::open( - data_dir.blobstore(), - Default::default(), - )?) + let cache_size = Some(ctx.config().txpool.max_cached_entries); + create_blob_store_with_cache(ctx, cache_size) } -/// Create blob store with custom cache size configuration. +/// Create blob store with custom cache size configuration for how many blobs should be cached in +/// memory. pub fn create_blob_store_with_cache( ctx: &BuilderContext, cache_size: Option, diff --git a/crates/node/builder/src/engine_api_ext.rs b/crates/node/builder/src/engine_api_ext.rs index 936a2e19051..33d1d3e63ad 100644 --- a/crates/node/builder/src/engine_api_ext.rs +++ b/crates/node/builder/src/engine_api_ext.rs @@ -5,7 +5,6 @@ use crate::rpc::EngineApiBuilder; use eyre::Result; use reth_node_api::{AddOnsContext, FullNodeComponents}; -use reth_rpc_api::IntoEngineApiRpcModule; /// Provides access to an `EngineApi` instance with a callback #[derive(Debug)] @@ -27,7 +26,7 @@ impl EngineApiBuilder for EngineApiExt where B: EngineApiBuilder, N: FullNodeComponents, - B::EngineApi: IntoEngineApiRpcModule + Send + Sync + Clone + 'static, + B::EngineApi: Clone, F: FnOnce(B::EngineApi) + Send + Sync + 'static, { type EngineApi = B::EngineApi; diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 3a35c4183f1..2d1fb6924d8 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -41,12 +41,10 @@ use eyre::Context; use rayon::ThreadPoolBuilder; use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; -use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitStorageError}; -use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; -use reth_evm::{noop::NoopEvmConfig, ConfigureEvm}; +use reth_evm::ConfigureEvm; use reth_exex::ExExManagerHandle; use reth_fs_util as fs; use reth_network_p2p::headers::client::HeadersClient; @@ -67,25 +65,19 @@ use reth_node_metrics::{ }; use reth_provider::{ providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, - ProviderResult, StageCheckpointReader, StaticFileProviderFactory, + BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, ProviderResult, + StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; use reth_rpc_layer::JwtSecret; -use reth_stages::{ - sets::DefaultStages, stages::EraImportSource, MetricEvent, PipelineBuilder, PipelineTarget, - StageId, -}; +use reth_stages::{stages::EraImportSource, MetricEvent}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; use reth_transaction_pool::TransactionPool; use std::{sync::Arc, thread::available_parallelism}; -use tokio::sync::{ - mpsc::{unbounded_channel, UnboundedSender}, - oneshot, watch, -}; +use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; use futures::{future::Either, stream, Stream, StreamExt}; use reth_node_ethstats::EthStatsService; @@ -466,70 +458,13 @@ where N: ProviderNodeTypes, Evm: ConfigureEvm + 'static, { - let factory = ProviderFactory::new( + Ok(ProviderFactory::new( self.right().clone(), self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, ) .with_prune_modes(self.prune_modes()) - .with_static_files_metrics(); - - let has_receipt_pruning = - self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); - - // Check for consistency between database and static files. If it fails, it unwinds to - // the first block that's consistent between database and static files. - if let Some(unwind_target) = factory - .static_file_provider() - .check_consistency(&factory.provider()?, has_receipt_pruning)? - { - // Highly unlikely to happen, and given its destructive nature, it's better to panic - // instead. - assert_ne!( - unwind_target, - PipelineTarget::Unwind(0), - "A static file <> database inconsistency was found that would trigger an unwind to block 0" - ); - - info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); - - let (_tip_tx, tip_rx) = watch::channel(B256::ZERO); - - // Builds an unwind-only pipeline - let pipeline = PipelineBuilder::default() - .add_stages(DefaultStages::new( - factory.clone(), - tip_rx, - Arc::new(NoopConsensus::default()), - NoopHeaderDownloader::default(), - NoopBodiesDownloader::default(), - NoopEvmConfig::::default(), - self.toml_config().stages.clone(), - self.prune_modes(), - None, - )) - .build( - factory.clone(), - StaticFileProducer::new(factory.clone(), self.prune_modes()), - ); - - // Unwinds to block - let (tx, rx) = oneshot::channel(); - - // Pipeline should be run as blocking and panic if it fails. - self.task_executor().spawn_critical_blocking( - "pipeline task", - Box::pin(async move { - let (_, result) = pipeline.run_as_fut(Some(unwind_target)).await; - let _ = tx.send(result); - }), - ); - rx.await?.inspect_err(|err| { - error!(target: "reth::cli", unwind_target = %unwind_target, %err, "failed to run unwind") - })?; - } - - Ok(factory) + .with_static_files_metrics()) } /// Creates a new [`ProviderFactory`] and attaches it to the launch context. @@ -582,7 +517,7 @@ where // ensure recorder runs upkeep periodically install_prometheus_recorder().spawn_upkeep(); - let listen_addr = self.node_config().metrics; + let listen_addr = self.node_config().metrics.prometheus; if let Some(addr) = listen_addr { info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); let config = MetricServerConfig::new( @@ -852,21 +787,6 @@ where &self.node_adapter().provider } - /// Returns the initial backfill to sync to at launch. - /// - /// This returns the configured `debug.tip` if set, otherwise it will check if backfill was - /// previously interrupted and returns the block hash of the last checkpoint, see also - /// [`Self::check_pipeline_consistency`] - pub fn initial_backfill_target(&self) -> ProviderResult> { - let mut initial_target = self.node_config().debug.tip; - - if initial_target.is_none() { - initial_target = self.check_pipeline_consistency()?; - } - - Ok(initial_target) - } - /// Returns true if the node should terminate after the initial backfill run. /// /// This is the case if any of these configs are set: @@ -880,7 +800,7 @@ where /// /// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past /// bedrock height) - fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { + pub fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { if self.chain_spec().is_optimism() && !self.is_dev() && self.chain_id() == Chain::optimism_mainnet() @@ -898,54 +818,6 @@ where Ok(()) } - /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less - /// than the checkpoint of the first stage). - /// - /// This will return the pipeline target if: - /// * the pipeline was interrupted during its previous run - /// * a new stage was added - /// * stage data was dropped manually through `reth stage drop ...` - /// - /// # Returns - /// - /// A target block hash if the pipeline is inconsistent, otherwise `None`. - pub fn check_pipeline_consistency(&self) -> ProviderResult> { - // If no target was provided, check if the stages are congruent - check if the - // checkpoint of the last stage matches the checkpoint of the first. - let first_stage_checkpoint = self - .blockchain_db() - .get_stage_checkpoint(*StageId::ALL.first().unwrap())? - .unwrap_or_default() - .block_number; - - // Skip the first stage as we've already retrieved it and comparing all other checkpoints - // against it. - for stage_id in StageId::ALL.iter().skip(1) { - let stage_checkpoint = self - .blockchain_db() - .get_stage_checkpoint(*stage_id)? - .unwrap_or_default() - .block_number; - - // If the checkpoint of any stage is less than the checkpoint of the first stage, - // retrieve and return the block hash of the latest header and use it as the target. - if stage_checkpoint < first_stage_checkpoint { - debug!( - target: "consensus::engine", - first_stage_checkpoint, - inconsistent_stage_id = %stage_id, - inconsistent_stage_checkpoint = stage_checkpoint, - "Pipeline sync progress is inconsistent" - ); - return self.blockchain_db().block_hash(first_stage_checkpoint); - } - } - - self.ensure_chain_specific_db_checks()?; - - Ok(None) - } - /// Expire the pre-merge transactions if the node is configured to do so and the chain has a /// merge block. /// diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index c5b639cbb89..d7b139c4d50 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -126,9 +126,6 @@ impl EngineNodeLauncher { })? .with_components(components_builder, on_component_initialized).await?; - // Try to expire pre-merge transaction history if configured - ctx.expire_pre_merge_transactions()?; - // spawn exexs if any let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?; @@ -150,7 +147,7 @@ impl EngineNodeLauncher { let consensus = Arc::new(ctx.components().consensus().clone()); - let pipeline = build_networked_pipeline( + let mut pipeline = build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), consensus.clone(), @@ -166,7 +163,18 @@ impl EngineNodeLauncher { )?; // The new engine writes directly to static files. This ensures that they're up to the tip. - pipeline.move_to_static_files()?; + pipeline.ensure_static_files_consistency().await?; + + // Try to expire pre-merge transaction history if configured + ctx.expire_pre_merge_transactions()?; + + let initial_target = if let Some(tip) = ctx.node_config().debug.tip { + Some(tip) + } else { + pipeline.initial_backfill_target()? + }; + + ctx.ensure_chain_specific_db_checks()?; let pipeline_events = pipeline.events(); @@ -258,7 +266,6 @@ impl EngineNodeLauncher { let add_ons_handle = add_ons.launch_add_ons(add_ons_ctx).await?; // Run consensus engine to completion - let initial_target = ctx.initial_backfill_target()?; let mut built_payloads = ctx .components() .payload_builder_handle() diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 2240fa98837..bf784b50703 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -77,6 +77,8 @@ tokio.workspace = true # Features for vergen to generate correct env vars jemalloc = ["reth-cli-util/jemalloc"] asm-keccak = ["alloy-primitives/asm-keccak"] +# Feature to enable opentelemetry export +otlp = ["reth-tracing/otlp"] [build-dependencies] vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] } diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 88179a6b40e..6b678b5789b 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -108,6 +108,16 @@ pub struct EngineArgs { /// See `TreeConfig::unwind_canonical_header` for more details. #[arg(long = "engine.allow-unwind-canonical-header", default_value = "false")] pub allow_unwind_canonical_header: bool, + + /// Configure the number of storage proof workers in the Tokio blocking pool. + /// If not specified, defaults to 2x available parallelism, clamped between 2 and 64. + #[arg(long = "engine.storage-worker-count")] + pub storage_worker_count: Option, + + /// Configure the number of account proof workers in the Tokio blocking pool. + /// If not specified, defaults to the same count as storage workers. + #[arg(long = "engine.account-worker-count")] + pub account_worker_count: Option, } #[allow(deprecated)] @@ -134,6 +144,8 @@ impl Default for EngineArgs { state_root_fallback: false, always_process_payload_attributes_on_canonical_head: false, allow_unwind_canonical_header: false, + storage_worker_count: None, + account_worker_count: None, } } } @@ -141,7 +153,7 @@ impl Default for EngineArgs { impl EngineArgs { /// Creates a [`TreeConfig`] from the engine arguments. pub fn tree_config(&self) -> TreeConfig { - TreeConfig::default() + let mut config = TreeConfig::default() .with_persistence_threshold(self.persistence_threshold) .with_memory_block_buffer_target(self.memory_block_buffer_target) .with_legacy_state_root(self.legacy_state_root_task_enabled) @@ -159,7 +171,17 @@ impl EngineArgs { .with_always_process_payload_attributes_on_canonical_head( self.always_process_payload_attributes_on_canonical_head, ) - .with_unwind_canonical_header(self.allow_unwind_canonical_header) + .with_unwind_canonical_header(self.allow_unwind_canonical_header); + + if let Some(count) = self.storage_worker_count { + config = config.with_storage_worker_count(count); + } + + if let Some(count) = self.account_worker_count { + config = config.with_account_worker_count(count); + } + + config } } diff --git a/crates/node/core/src/args/log.rs b/crates/node/core/src/args/log.rs index 1236984fac0..99fefc11445 100644 --- a/crates/node/core/src/args/log.rs +++ b/crates/node/core/src/args/log.rs @@ -70,6 +70,7 @@ pub struct LogArgs { default_value_t = ColorMode::Always )] pub color: ColorMode, + /// The verbosity settings for the tracer. #[command(flatten)] pub verbosity: Verbosity, diff --git a/crates/node/core/src/args/metric.rs b/crates/node/core/src/args/metric.rs new file mode 100644 index 00000000000..d46018b8e77 --- /dev/null +++ b/crates/node/core/src/args/metric.rs @@ -0,0 +1,13 @@ +use clap::Parser; +use reth_cli_util::parse_socket_address; +use std::net::SocketAddr; + +/// Metrics configuration. +#[derive(Debug, Clone, Default, Parser)] +pub struct MetricArgs { + /// Enable Prometheus metrics. + /// + /// The metrics will be served at the given interface and port. + #[arg(long="metrics", alias = "metrics.prometheus", value_name = "PROMETHEUS", value_parser = parse_socket_address, help_heading = "Metrics")] + pub prometheus: Option, +} diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 6799fe418dc..54e77740146 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -24,6 +24,14 @@ pub use database::DatabaseArgs; mod log; pub use log::{ColorMode, LogArgs, Verbosity}; +/// `TraceArgs` for tracing and spans support +mod trace; +pub use trace::TraceArgs; + +/// `MetricArgs` to configure metrics. +mod metric; +pub use metric::MetricArgs; + /// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; pub use payload_builder::PayloadBuilderArgs; diff --git a/crates/node/core/src/args/network.rs b/crates/node/core/src/args/network.rs index a32f14edd41..52ff52b1cee 100644 --- a/crates/node/core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -184,6 +184,10 @@ pub struct NetworkArgs { /// Peers that don't have these blocks will be filtered out. #[arg(long = "required-block-hashes", value_delimiter = ',')] pub required_block_hashes: Vec, + + /// Optional network ID to override the chain specification's network ID for P2P connections + #[arg(long)] + pub network_id: Option, } impl NetworkArgs { @@ -297,6 +301,7 @@ impl NetworkArgs { )) .disable_tx_gossip(self.disable_tx_gossip) .required_block_hashes(self.required_block_hashes.clone()) + .network_id(self.network_id) } /// If `no_persist_peers` is false then this returns the path to the persistent peers file path. @@ -371,6 +376,7 @@ impl Default for NetworkArgs { disable_tx_gossip: false, propagation_mode: TransactionPropagationMode::Sqrt, required_block_hashes: vec![], + network_id: None, } } } diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs new file mode 100644 index 00000000000..751ab556ac8 --- /dev/null +++ b/crates/node/core/src/args/trace.rs @@ -0,0 +1,61 @@ +//! Opentelemetry tracing configuration through CLI args. + +use clap::Parser; +use eyre::{ensure, WrapErr}; +use tracing::Level; +use url::Url; + +/// CLI arguments for configuring `Opentelemetry` trace and span export. +#[derive(Debug, Clone, Parser)] +pub struct TraceArgs { + /// Enable `Opentelemetry` tracing export to an OTLP endpoint. + /// + /// If no value provided, defaults to `http://localhost:4318/v1/traces`. + /// + /// Example: --tracing-otlp=http://collector:4318/v1/traces + #[arg( + long = "tracing-otlp", + global = true, + value_name = "URL", + num_args = 0..=1, + default_missing_value = "http://localhost:4318/v1/traces", + require_equals = true, + value_parser = parse_otlp_endpoint, + help_heading = "Tracing" + )] + pub otlp: Option, + + /// Set the minimum log level for OTLP traces. + /// + /// Valid values: ERROR, WARN, INFO, DEBUG, TRACE + /// + /// Defaults to TRACE if not specified. + #[arg( + long = "tracing-otlp-level", + global = true, + value_name = "LEVEL", + default_value = "TRACE", + help_heading = "Tracing" + )] + pub otlp_level: Level, +} + +impl Default for TraceArgs { + fn default() -> Self { + Self { otlp: None, otlp_level: Level::TRACE } + } +} + +// Parses and validates an OTLP endpoint url. +fn parse_otlp_endpoint(arg: &str) -> eyre::Result { + let url = Url::parse(arg).wrap_err("Invalid URL for OTLP trace output")?; + + // OTLP url must end with `/v1/traces` per the OTLP specification. + ensure!( + url.path().ends_with("/v1/traces"), + "OTLP trace endpoint must end with /v1/traces, got path: {}", + url.path() + ); + + Ok(url) +} diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 96fa8cc8dfa..94dbecb649c 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -27,21 +27,17 @@ use reth_transaction_pool::TransactionPool; use serde::{de::DeserializeOwned, Serialize}; use std::{ fs, - net::SocketAddr, path::{Path, PathBuf}, sync::Arc, }; use tracing::*; -use crate::args::EraArgs; +use crate::args::{EraArgs, MetricArgs}; pub use reth_engine_primitives::{ DEFAULT_MAX_PROOF_TASK_CONCURRENCY, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, - DEFAULT_RESERVED_CPU_CORES, + DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; -/// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. -pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; - /// Default size of cross-block cache in megabytes. pub const DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB: u64 = 4 * 1024; @@ -103,10 +99,8 @@ pub struct NodeConfig { /// Possible values are either a built-in chain or the path to a chain specification file. pub chain: Arc, - /// Enable Prometheus metrics. - /// - /// The metrics will be served at the given interface and port. - pub metrics: Option, + /// Enable to configure metrics export to endpoints + pub metrics: MetricArgs, /// Add a new instance of a node. /// @@ -171,7 +165,7 @@ impl NodeConfig { Self { config: None, chain, - metrics: None, + metrics: MetricArgs::default(), instance: None, network: NetworkArgs::default(), rpc: RpcServerArgs::default(), @@ -225,8 +219,8 @@ impl NodeConfig { } /// Set the metrics address for the node - pub const fn with_metrics(mut self, metrics: SocketAddr) -> Self { - self.metrics = Some(metrics); + pub const fn with_metrics(mut self, metrics: MetricArgs) -> Self { + self.metrics = metrics; self } @@ -517,7 +511,7 @@ impl Clone for NodeConfig { Self { chain: self.chain.clone(), config: self.config.clone(), - metrics: self.metrics, + metrics: self.metrics.clone(), instance: self.instance, network: self.network.clone(), rpc: self.rpc.clone(), diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 422da3b883e..6ed24ca5823 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -74,6 +74,11 @@ reth-stages = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] } [features] +default = ["otlp"] + +# Opentelemtry feature to activate metrics export +otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] + asm-keccak = [ "alloy-primitives/asm-keccak", "reth-node-core/asm-keccak", diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 1e9f7960ad1..891578cbe24 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -116,7 +116,18 @@ where /// If file logging is enabled, this function stores guard to the struct. pub fn init_tracing(&mut self) -> Result<()> { if self.guard.is_none() { - let layers = self.layers.take().unwrap_or_default(); + let mut layers = self.layers.take().unwrap_or_default(); + + #[cfg(feature = "otlp")] + if let Some(output_type) = &self.cli.traces.otlp { + info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", output_type); + layers.with_span_layer( + "reth".to_string(), + output_type.clone(), + self.cli.traces.otlp_level, + )?; + } + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); } diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index f910df244eb..7dd985ce25f 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -48,7 +48,10 @@ use reth_cli_commands::launcher::FnLauncher; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::{args::LogArgs, version::version_metadata}; +use reth_node_core::{ + args::{LogArgs, TraceArgs}, + version::version_metadata, +}; use reth_optimism_node::args::RollupArgs; // This allows us to manually enable node metrics features, required for proper jemalloc metric @@ -73,6 +76,10 @@ pub struct Cli< #[command(flatten)] pub logs: LogArgs, + /// The metrics configuration for the CLI. + #[command(flatten)] + pub traces: TraceArgs, + /// Type marker for the RPC module validator #[arg(skip)] _phantom: PhantomData, @@ -198,6 +205,7 @@ mod test { "10000", "--metrics", "9003", + "--tracing-otlp=http://localhost:4318/v1/traces", "--log.file.max-size", "100", "--builder.gaslimit", diff --git a/crates/optimism/consensus/src/validation/isthmus.rs b/crates/optimism/consensus/src/validation/isthmus.rs index 64d45eae5c8..4703e10869e 100644 --- a/crates/optimism/consensus/src/validation/isthmus.rs +++ b/crates/optimism/consensus/src/validation/isthmus.rs @@ -4,7 +4,6 @@ use crate::OpConsensusError; use alloy_consensus::BlockHeader; use alloy_primitives::{address, Address, B256}; use alloy_trie::EMPTY_ROOT_HASH; -use core::fmt::Debug; use reth_storage_api::{errors::ProviderResult, StorageRootProvider}; use reth_trie_common::HashedStorage; use revm::database::BundleState; @@ -72,7 +71,7 @@ pub fn verify_withdrawals_root( ) -> Result<(), OpConsensusError> where DB: StorageRootProvider, - H: BlockHeader + Debug, + H: BlockHeader, { let header_storage_root = header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?; @@ -110,7 +109,7 @@ pub fn verify_withdrawals_root_prehashed( ) -> Result<(), OpConsensusError> where DB: StorageRootProvider, - H: BlockHeader + core::fmt::Debug, + H: BlockHeader, { let header_storage_root = header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?; diff --git a/crates/optimism/evm/src/build.rs b/crates/optimism/evm/src/build.rs index 087b7f10046..edc877a9a5d 100644 --- a/crates/optimism/evm/src/build.rs +++ b/crates/optimism/evm/src/build.rs @@ -14,6 +14,7 @@ use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus}; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::DepositReceipt; use reth_primitives_traits::{Receipt, SignedTransaction}; +use revm::context::Block as _; /// Block builder for Optimism. #[derive(Debug)] @@ -53,7 +54,7 @@ impl OpBlockAssembler { } = input; let ctx = ctx.into(); - let timestamp = evm_env.block_env.timestamp.saturating_to(); + let timestamp = evm_env.block_env.timestamp().saturating_to(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = @@ -88,19 +89,19 @@ impl OpBlockAssembler { let header = Header { parent_hash: ctx.parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: evm_env.block_env.beneficiary, + beneficiary: evm_env.block_env.beneficiary(), state_root, transactions_root, receipts_root, withdrawals_root, logs_bloom, timestamp, - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(evm_env.block_env.basefee), - number: evm_env.block_env.number.saturating_to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + base_fee_per_gas: Some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().saturating_to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: ctx.extra_data, parent_beacon_block_root: ctx.parent_beacon_block_root, diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index a538c8d8690..4165221c987 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -88,10 +88,12 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result Result + FromTxWithEncoded - + TransactionEnv, + + TransactionEnv + + OpTxEnv, Precompiles = PrecompilesMap, Spec = OpSpecId, + BlockEnv = BlockEnv, > + Debug, Self: Send + Sync + Unpin + Clone + 'static, { diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index e818e9cb538..11647039930 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -4,7 +4,7 @@ pub use payload::{ ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, FlashBlockDecoder, Metadata, }; -pub use service::FlashBlockService; +pub use service::{FlashBlockBuildInfo, FlashBlockService}; pub use ws::{WsConnect, WsFlashBlockStream}; mod consensus; @@ -12,7 +12,7 @@ pub use consensus::FlashBlockConsensusClient; mod payload; pub use payload::PendingFlashBlock; mod sequence; -pub use sequence::FlashBlockCompleteSequence; +pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence}; mod service; mod worker; @@ -28,3 +28,6 @@ pub type PendingBlockRx = tokio::sync::watch::Receiver; + +/// Receiver that signals whether a [`FlashBlock`] is currently being built. +pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; diff --git a/crates/optimism/flashblocks/src/sequence.rs b/crates/optimism/flashblocks/src/sequence.rs index 087f97db7be..59d4cfecbcd 100644 --- a/crates/optimism/flashblocks/src/sequence.rs +++ b/crates/optimism/flashblocks/src/sequence.rs @@ -13,7 +13,7 @@ const FLASHBLOCK_SEQUENCE_CHANNEL_SIZE: usize = 128; /// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. #[derive(Debug)] -pub(crate) struct FlashBlockPendingSequence { +pub struct FlashBlockPendingSequence { /// tracks the individual flashblocks in order /// /// With a blocktime of 2s and flashblock tick-rate of 200ms plus one extra flashblock per new @@ -29,7 +29,8 @@ impl FlashBlockPendingSequence where T: SignedTransaction, { - pub(crate) fn new() -> Self { + /// Create a new pending sequence. + pub fn new() -> Self { // Note: if the channel is full, send will not block but rather overwrite the oldest // messages. Order is preserved. let (tx, _) = broadcast::channel(FLASHBLOCK_SEQUENCE_CHANNEL_SIZE); @@ -37,7 +38,7 @@ where } /// Gets a subscriber to the flashblock sequences produced. - pub(crate) fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { + pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { self.block_broadcaster.subscribe() } @@ -70,7 +71,7 @@ where /// Inserts a new block into the sequence. /// /// A [`FlashBlock`] with index 0 resets the set. - pub(crate) fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { + pub fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { if flashblock.index == 0 { trace!(number=%flashblock.block_number(), "Tracking new flashblock sequence"); @@ -93,7 +94,7 @@ where } /// Set state root - pub(crate) const fn set_state_root(&mut self, state_root: Option) { + pub const fn set_state_root(&mut self, state_root: Option) { self.state_root = state_root; } @@ -103,9 +104,7 @@ where /// the sequence /// /// Note: flashblocks start at `index 0`. - pub(crate) fn ready_transactions( - &self, - ) -> impl Iterator>> + '_ { + pub fn ready_transactions(&self) -> impl Iterator>> + '_ { self.inner .values() .enumerate() @@ -117,31 +116,40 @@ where } /// Returns the first block number - pub(crate) fn block_number(&self) -> Option { + pub fn block_number(&self) -> Option { Some(self.inner.values().next()?.block().metadata.block_number) } /// Returns the payload base of the first tracked flashblock. - pub(crate) fn payload_base(&self) -> Option { + pub fn payload_base(&self) -> Option { self.inner.values().next()?.block().base.clone() } /// Returns the number of tracked flashblocks. - pub(crate) fn count(&self) -> usize { + pub fn count(&self) -> usize { self.inner.len() } /// Returns the reference to the last flashblock. - pub(crate) fn last_flashblock(&self) -> Option<&FlashBlock> { + pub fn last_flashblock(&self) -> Option<&FlashBlock> { self.inner.last_key_value().map(|(_, b)| &b.block) } /// Returns the current/latest flashblock index in the sequence - pub(crate) fn index(&self) -> Option { + pub fn index(&self) -> Option { Some(self.inner.values().last()?.block().index) } } +impl Default for FlashBlockPendingSequence +where + T: SignedTransaction, +{ + fn default() -> Self { + Self::new() + } +} + /// A complete sequence of flashblocks, often corresponding to a full block. /// Ensure invariants of a complete flashblocks sequence. #[derive(Debug, Clone)] diff --git a/crates/optimism/flashblocks/src/service.rs b/crates/optimism/flashblocks/src/service.rs index f4cf7f18450..7e442470d98 100644 --- a/crates/optimism/flashblocks/src/service.rs +++ b/crates/optimism/flashblocks/src/service.rs @@ -1,7 +1,8 @@ use crate::{ sequence::FlashBlockPendingSequence, worker::{BuildArgs, FlashBlockBuilder}, - ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx, PendingFlashBlock, + ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, + PendingFlashBlock, }; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; @@ -21,7 +22,10 @@ use std::{ task::{ready, Context, Poll}, time::Instant, }; -use tokio::{pin, sync::oneshot}; +use tokio::{ + pin, + sync::{oneshot, watch}, +}; use tracing::{debug, trace, warn}; pub(crate) const FB_STATE_ROOT_FROM_INDEX: usize = 9; @@ -48,11 +52,25 @@ pub struct FlashBlockService< /// when fb received on top of the same block. Avoid redundant I/O across multiple /// executions within the same block. cached_state: Option<(B256, CachedReads)>, + /// Signals when a block build is in progress + in_progress_tx: watch::Sender>, + /// `FlashBlock` service's metrics metrics: FlashBlockServiceMetrics, /// Enable state root calculation from flashblock with index [`FB_STATE_ROOT_FROM_INDEX`] compute_state_root: bool, } +/// Information for a flashblock currently built +#[derive(Debug, Clone, Copy)] +pub struct FlashBlockBuildInfo { + /// Parent block hash + pub parent_hash: B256, + /// Flashblock index within the current block's sequence + pub index: u64, + /// Block number of the flashblock being built. + pub block_number: u64, +} + impl FlashBlockService where N: NodePrimitives, @@ -73,6 +91,7 @@ where { /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. pub fn new(rx: S, evm_config: EvmConfig, provider: Provider, spawner: TaskExecutor) -> Self { + let (in_progress_tx, _) = watch::channel(None); Self { rx, current: None, @@ -83,6 +102,7 @@ where spawner, job: None, cached_state: None, + in_progress_tx, metrics: FlashBlockServiceMetrics::default(), compute_state_root: false, } @@ -99,6 +119,11 @@ where self.blocks.subscribe_block_sequence() } + /// Returns a receiver that signals when a flashblock is being built. + pub fn subscribe_in_progress(&self) -> InProgressFlashBlockRx { + self.in_progress_tx.subscribe() + } + /// Drives the services and sends new blocks to the receiver /// /// Note: this should be spawned @@ -218,6 +243,8 @@ where }; // reset job this.job.take(); + // No build in progress + let _ = this.in_progress_tx.send(None); if let Some((now, result)) = result { match result { @@ -293,6 +320,13 @@ where if let Some(args) = this.build_args() { let now = Instant::now(); + let fb_info = FlashBlockBuildInfo { + parent_hash: args.base.parent_hash, + index: args.last_flashblock_index, + block_number: args.base.block_number, + }; + // Signal that a flashblock build has started with build metadata + let _ = this.in_progress_tx.send(Some(fb_info)); let (tx, rx) = oneshot::channel(); let builder = this.builder.clone(); diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index e0437a5f655..b495fdb47ce 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -19,7 +19,7 @@ use reth_optimism_node::{args::RollupArgs, OpEvmConfig, OpExecutorBuilder, OpNod use reth_optimism_primitives::OpPrimitives; use reth_provider::providers::BlockchainProvider; use revm::{ - context::{Cfg, ContextTr, TxEnv}, + context::{BlockEnv, Cfg, ContextTr, TxEnv}, context_interface::result::EVMError, inspector::NoOpInspector, interpreter::interpreter::EthInterpreter, @@ -94,6 +94,7 @@ fn test_setup_custom_precompiles() { EVMError; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm( diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 1d73464e178..ecc7a400349 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -567,9 +567,9 @@ where } /// Returns the current fee settings for transactions from the mempool - pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes { + pub fn best_transaction_attributes(&self, block_env: impl Block) -> BestTransactionsAttributes { BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|p| p as u64), ) } @@ -659,10 +659,10 @@ where Transaction: PoolTransaction> + OpPooledTx, >, ) -> Result, PayloadBuilderError> { - let block_gas_limit = builder.evm_mut().block().gas_limit; + let block_gas_limit = builder.evm_mut().block().gas_limit(); let block_da_limit = self.da_config.max_da_block_size(); let tx_da_limit = self.da_config.max_da_tx_size(); - let base_fee = builder.evm_mut().block().basefee; + let base_fee = builder.evm_mut().block().basefee(); while let Some(tx) = best_txs.next(()) { let interop = tx.interop_deadline(); diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index b7ce75c51b2..4e853984ac9 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,5 +1,4 @@ use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; -use reth_evm::{SpecFor, TxEnvFor}; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, FromEvmError, RpcConvert, @@ -9,12 +8,7 @@ impl EthCall for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -22,12 +16,7 @@ impl EstimateCall for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -35,12 +24,7 @@ impl Call for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = OpEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index fdd06d224bc..e10c5152473 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -13,7 +13,7 @@ use crate::{ OpEthApiError, SequencerClient, }; use alloy_consensus::BlockHeader; -use alloy_primitives::U256; +use alloy_primitives::{B256, U256}; use eyre::WrapErr; use op_alloy_network::Optimism; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; @@ -23,8 +23,8 @@ use reth_evm::ConfigureEvm; use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ - ExecutionPayloadBaseV1, FlashBlockCompleteSequenceRx, FlashBlockService, PendingBlockRx, - WsFlashBlockStream, + ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockService, + InProgressFlashBlockRx, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -43,10 +43,18 @@ use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, }; -use std::{fmt, fmt::Formatter, marker::PhantomData, sync::Arc, time::Instant}; -use tokio::sync::watch; +use std::{ + fmt::{self, Formatter}, + marker::PhantomData, + sync::Arc, + time::Duration, +}; +use tokio::{sync::watch, time}; use tracing::info; +/// Maximum duration to wait for a fresh flashblock when one is being built. +const MAX_FLASHBLOCK_WAIT_DURATION: Duration = Duration::from_millis(50); + /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner; @@ -79,6 +87,7 @@ impl OpEthApi { min_suggested_priority_fee: U256, pending_block_rx: Option>, flashblock_rx: Option, + in_progress_rx: Option, ) -> Self { let inner = Arc::new(OpEthApiInner { eth_api, @@ -86,6 +95,7 @@ impl OpEthApi { min_suggested_priority_fee, pending_block_rx, flashblock_rx, + in_progress_rx, }); Self { inner } } @@ -109,15 +119,57 @@ impl OpEthApi { self.inner.flashblock_rx.as_ref().map(|rx| rx.resubscribe()) } + /// Returns information about the flashblock currently being built, if any. + fn flashblock_build_info(&self) -> Option { + self.inner.in_progress_rx.as_ref().and_then(|rx| *rx.borrow()) + } + + /// Extracts pending block if it matches the expected parent hash. + fn extract_matching_block( + &self, + block: Option<&PendingFlashBlock>, + parent_hash: B256, + ) -> Option> { + block.filter(|b| b.block().parent_hash() == parent_hash).map(|b| b.pending.clone()) + } + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. pub const fn builder() -> OpEthApiBuilder { OpEthApiBuilder::new() } + /// Awaits a fresh flashblock if one is being built, otherwise returns current. + async fn flashblock( + &self, + parent_hash: B256, + ) -> eyre::Result>> { + let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) }; + + // Check if a flashblock is being built + if let Some(build_info) = self.flashblock_build_info() { + let current_index = rx.borrow().as_ref().map(|b| b.last_flashblock_index); + + // Check if this is the first flashblock or the next consecutive index + let is_next_index = current_index.is_none_or(|idx| build_info.index == idx + 1); + + // Wait only for relevant flashblocks: matching parent and next in sequence + if build_info.parent_hash == parent_hash && is_next_index { + let mut rx_clone = rx.clone(); + // Wait up to MAX_FLASHBLOCK_WAIT_DURATION for a new flashblock to arrive + let _ = time::timeout(MAX_FLASHBLOCK_WAIT_DURATION, rx_clone.changed()).await; + } + } + + // Fall back to current block + Ok(self.extract_matching_block(rx.borrow().as_ref(), parent_hash)) + } + /// Returns a [`PendingBlock`] that is built out of flashblocks. /// /// If flashblocks receiver is not set, then it always returns `None`. - pub fn pending_flashblock(&self) -> eyre::Result>> + /// + /// It may wait up to 50ms for a fresh flashblock if one is currently being built. + pub async fn pending_flashblock(&self) -> eyre::Result>> where OpEthApiError: FromEvmError, Rpc: RpcConvert, @@ -128,21 +180,7 @@ impl OpEthApi { PendingBlockEnvOrigin::DerivedFromLatest(parent) => parent, }; - let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) }; - let pending_block = rx.borrow(); - let Some(pending_block) = pending_block.as_ref() else { return Ok(None) }; - - let now = Instant::now(); - - // Is the pending block not expired and latest is its parent? - if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) && - parent.hash() == pending_block.block().parent_hash() && - now <= pending_block.expires_at - { - return Ok(Some(pending_block.pending.clone())); - } - - Ok(None) + self.flashblock(parent.hash()).await } } @@ -252,8 +290,12 @@ where } async fn suggested_priority_fee(&self) -> Result { - let min_tip = U256::from(self.inner.min_suggested_priority_fee); - self.inner.eth_api.gas_oracle().op_suggest_tip_cap(min_tip).await.map_err(Into::into) + self.inner + .eth_api + .gas_oracle() + .op_suggest_tip_cap(self.inner.min_suggested_priority_fee) + .await + .map_err(Into::into) } } @@ -330,6 +372,8 @@ pub struct OpEthApiInner { /// /// If set, then it provides sequences of flashblock built. flashblock_rx: Option, + /// Receiver that signals when a flashblock is being built + in_progress_rx: Option, } impl fmt::Debug for OpEthApiInner { @@ -465,24 +509,28 @@ where None }; - let rxs = if let Some(ws_url) = flashblocks_url { - info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); - let (tx, pending_block_rx) = watch::channel(None); - let stream = WsFlashBlockStream::new(ws_url); - let service = FlashBlockService::new( - stream, - ctx.components.evm_config().clone(), - ctx.components.provider().clone(), - ctx.components.task_executor().clone(), - ); - let flashblock_rx = service.subscribe_block_sequence(); - ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - Some((pending_block_rx, flashblock_rx)) - } else { - None - }; + let (pending_block_rx, flashblock_rx, in_progress_rx) = + if let Some(ws_url) = flashblocks_url { + info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); + + let (tx, pending_rx) = watch::channel(None); + let stream = WsFlashBlockStream::new(ws_url); + let service = FlashBlockService::new( + stream, + ctx.components.evm_config().clone(), + ctx.components.provider().clone(), + ctx.components.task_executor().clone(), + ); + + let flashblock_rx = service.subscribe_block_sequence(); + let in_progress_rx = service.subscribe_in_progress(); + + ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - let (pending_block_rx, flashblock_rx) = rxs.unzip(); + (Some(pending_rx), Some(flashblock_rx), Some(in_progress_rx)) + } else { + (None, None, None) + }; let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); @@ -492,6 +540,7 @@ where U256::from(min_suggested_priority_fee), pending_block_rx, flashblock_rx, + in_progress_rx, )) } } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 8857b89b021..151668f4039 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -42,7 +42,7 @@ where async fn local_pending_block( &self, ) -> Result>, Self::Error> { - if let Ok(Some(pending)) = self.pending_flashblock() { + if let Ok(Some(pending)) = self.pending_flashblock().await { return Ok(Some(pending.into_block_and_receipts())); } @@ -70,7 +70,7 @@ where where Self: SpawnBlocking, { - let Ok(Some(pending_block)) = self.pending_flashblock() else { + let Ok(Some(pending_block)) = self.pending_flashblock().await else { return Ok(None); }; diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 97fe3a0b5b7..775e79d5aff 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -458,10 +458,11 @@ mod test { OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) .unwrap(); - let mut l1_block_info = op_revm::L1BlockInfo::default(); - - l1_block_info.operator_fee_scalar = Some(U256::ZERO); - l1_block_info.operator_fee_constant = Some(U256::from(2)); + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::from(2)), + ..Default::default() + }; let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) @@ -481,10 +482,11 @@ mod test { OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice()) .unwrap(); - let mut l1_block_info = op_revm::L1BlockInfo::default(); - - l1_block_info.operator_fee_scalar = Some(U256::ZERO); - l1_block_info.operator_fee_constant = Some(U256::ZERO); + let mut l1_block_info = op_revm::L1BlockInfo { + operator_fee_scalar: Some(U256::ZERO), + operator_fee_constant: Some(U256::ZERO), + ..Default::default() + }; let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056) .l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index fb98569db10..aa7e8ea60bd 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -127,7 +127,7 @@ where } } => { // Check flashblocks for faster confirmation (Optimism-specific) - if let Ok(Some(pending_block)) = this.pending_flashblock() { + if let Ok(Some(pending_block)) = this.pending_flashblock().await { let block_and_receipts = pending_block.into_block_and_receipts(); if block_and_receipts.block.body().contains_transaction(&hash) && let Some(receipt) = this.transaction_receipt(hash).await? { @@ -168,7 +168,7 @@ where if tx_receipt.is_none() { // if flashblocks are supported, attempt to find id from the pending block - if let Ok(Some(pending_block)) = this.pending_flashblock() { + if let Ok(Some(pending_block)) = this.pending_flashblock().await { let block_and_receipts = pending_block.into_block_and_receipts(); if let Some((tx, receipt)) = block_and_receipts.find_transaction_and_receipt_by_hash(hash) diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 1c9b5d1c39e..10f8ad5dccd 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -12,6 +12,7 @@ pub mod engine; pub mod error; pub mod eth; pub mod historical; +pub mod metrics; pub mod miner; pub mod sequencer; pub mod witness; @@ -21,4 +22,5 @@ pub use engine::OpEngineApiClient; pub use engine::{OpEngineApi, OpEngineApiServer, OP_ENGINE_CAPABILITIES}; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder}; +pub use metrics::SequencerMetrics; pub use sequencer::SequencerClient; diff --git a/crates/optimism/rpc/src/metrics.rs b/crates/optimism/rpc/src/metrics.rs new file mode 100644 index 00000000000..5aa5e3eff3d --- /dev/null +++ b/crates/optimism/rpc/src/metrics.rs @@ -0,0 +1,21 @@ +//! RPC metrics unique for OP-stack. + +use core::time::Duration; +use metrics::Histogram; +use reth_metrics::Metrics; + +/// Optimism sequencer metrics +#[derive(Metrics, Clone)] +#[metrics(scope = "optimism_rpc.sequencer")] +pub struct SequencerMetrics { + /// How long it takes to forward a transaction to the sequencer + pub(crate) sequencer_forward_latency: Histogram, +} + +impl SequencerMetrics { + /// Records the duration it took to forward a transaction + #[inline] + pub fn record_forward_latency(&self, duration: Duration) { + self.sequencer_forward_latency.record(duration.as_secs_f64()); + } +} diff --git a/crates/optimism/rpc/src/sequencer.rs b/crates/optimism/rpc/src/sequencer.rs index 2e66a30275f..8fc8c1b389d 100644 --- a/crates/optimism/rpc/src/sequencer.rs +++ b/crates/optimism/rpc/src/sequencer.rs @@ -1,12 +1,11 @@ //! Helpers for optimism specific RPC implementations. -use crate::SequencerClientError; +use crate::{SequencerClientError, SequencerMetrics}; use alloy_json_rpc::{RpcRecv, RpcSend}; use alloy_primitives::{hex, B256}; use alloy_rpc_client::{BuiltInConnectionString, ClientBuilder, RpcClient as Client}; use alloy_rpc_types_eth::erc4337::TransactionConditional; use alloy_transport_http::Http; -use reth_optimism_txpool::supervisor::metrics::SequencerMetrics; use std::{str::FromStr, sync::Arc, time::Instant}; use thiserror::Error; use tracing::warn; diff --git a/crates/optimism/txpool/src/supervisor/metrics.rs b/crates/optimism/txpool/src/supervisor/metrics.rs index 23eec843025..cb51a52bfc5 100644 --- a/crates/optimism/txpool/src/supervisor/metrics.rs +++ b/crates/optimism/txpool/src/supervisor/metrics.rs @@ -1,4 +1,4 @@ -//! Optimism supervisor and sequencer metrics +//! Optimism supervisor metrics use crate::supervisor::InteropTxValidatorError; use op_alloy_rpc_types::SuperchainDAError; @@ -70,19 +70,3 @@ impl SupervisorMetrics { } } } - -/// Optimism sequencer metrics -#[derive(Metrics, Clone)] -#[metrics(scope = "optimism_transaction_pool.sequencer")] -pub struct SequencerMetrics { - /// How long it takes to forward a transaction to the sequencer - pub(crate) sequencer_forward_latency: Histogram, -} - -impl SequencerMetrics { - /// Records the duration it took to forward a transaction - #[inline] - pub fn record_forward_latency(&self, duration: Duration) { - self.sequencer_forward_latency.record(duration.as_secs_f64()); - } -} diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index f9530d003f5..f3f1b03ab2e 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -512,7 +512,7 @@ where f.debug_tuple("BestPayload").field(&f0).field(&f1).finish() } Self::PayloadTimestamp(f0, f1) => { - f.debug_tuple("PayloadAttributes").field(&f0).field(&f1).finish() + f.debug_tuple("PayloadTimestamp").field(&f0).field(&f1).finish() } Self::Resolve(f0, f1, _f2) => f.debug_tuple("Resolve").field(&f0).field(&f1).finish(), Self::Subscribe(f0) => f.debug_tuple("Subscribe").field(&f0).finish(), diff --git a/crates/primitives-traits/src/extended.rs b/crates/primitives-traits/src/extended.rs index 4cba4b7d52d..da2bbc533aa 100644 --- a/crates/primitives-traits/src/extended.rs +++ b/crates/primitives-traits/src/extended.rs @@ -142,8 +142,8 @@ where impl SignerRecoverable for Extended where - B: SignedTransaction + IsTyped2718, - T: SignedTransaction, + B: SignerRecoverable, + T: SignerRecoverable, { fn recover_signer(&self) -> Result { delegate!(self => tx.recover_signer()) diff --git a/crates/primitives-traits/src/transaction/access_list.rs b/crates/primitives-traits/src/transaction/access_list.rs index 06c033e36b0..e4d5638f562 100644 --- a/crates/primitives-traits/src/transaction/access_list.rs +++ b/crates/primitives-traits/src/transaction/access_list.rs @@ -8,22 +8,11 @@ mod tests { use proptest::proptest; use proptest_arbitrary_interop::arb; use reth_codecs::{add_arbitrary_tests, Compact}; - use serde::{Deserialize, Serialize}; /// This type is kept for compatibility tests after the codec support was added to alloy-eips /// `AccessList` type natively #[derive( - Clone, - Debug, - PartialEq, - Eq, - Hash, - Default, - RlpDecodableWrapper, - RlpEncodableWrapper, - Serialize, - Deserialize, - Compact, + Clone, Debug, PartialEq, Eq, Default, RlpDecodableWrapper, RlpEncodableWrapper, Compact, )] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact, rlp)] @@ -36,22 +25,9 @@ mod tests { } // This - #[derive( - Clone, - Debug, - PartialEq, - Eq, - Hash, - Default, - RlpDecodable, - RlpEncodable, - Serialize, - Deserialize, - Compact, - )] + #[derive(Clone, Debug, PartialEq, Eq, Default, RlpDecodable, RlpEncodable, Compact)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact, rlp)] - #[serde(rename_all = "camelCase")] struct RethAccessListItem { /// Account address that would be loaded at the start of execution address: Address, diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 665dcab9a88..1717cc6ec3f 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-ethereum-primitives = { workspace = true, features = ["serde"] } -reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-primitives-traits.workspace = true reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 5b0a4b1af51..ccab1bbbe99 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -17,7 +17,7 @@ use core::error; use dyn_clone::DynClone; use reth_evm::{ revm::context_interface::{either::Either, Block}, - ConfigureEvm, SpecFor, TxEnvFor, + BlockEnvFor, ConfigureEvm, EvmEnvFor, TxEnvFor, }; use reth_primitives_traits::{ BlockTy, HeaderTy, NodePrimitives, SealedBlock, SealedHeader, SealedHeaderFor, TransactionMeta, @@ -123,19 +123,16 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { /// Associated lower layer consensus types to convert from and into types of [`Self::Network`]. type Primitives: NodePrimitives; + /// The EVM configuration. + type Evm: ConfigureEvm; + /// Associated upper layer JSON-RPC API network requests and responses to convert from and into /// types of [`Self::Primitives`]. type Network: RpcTypes + Send + Sync + Unpin + Clone + Debug; - /// A set of variables for executing a transaction. - type TxEnv; - /// An associated RPC conversion error. type Error: error::Error + Into>; - /// The EVM specification identifier. - type Spec; - /// Wrapper for `fill()` with default `TransactionInfo` /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. @@ -169,9 +166,8 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { fn tx_env( &self, request: RpcTxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result; + evm_env: &EvmEnvFor, + ) -> Result, Self::Error>; /// Converts a set of primitive receipts to RPC representations. It is guaranteed that all /// receipts are from the same block. @@ -199,8 +195,8 @@ pub trait RpcConvert: Send + Sync + Unpin + Debug + DynClone + 'static { } dyn_clone::clone_trait_object!( - - RpcConvert + + RpcConvert ); /// Converts `self` into `T`. The opposite of [`FromConsensusTx`]. @@ -439,7 +435,7 @@ impl TryIntoSimTx> for TransactionRequest { /// implementation for free, thanks to the blanket implementation, unless the conversion requires /// more context. For example, some configuration parameters or access handles to database, network, /// etc. -pub trait TxEnvConverter: +pub trait TxEnvConverter: Debug + Send + Sync + Unpin + Clone + 'static { /// An associated error that can occur during conversion. @@ -451,31 +447,30 @@ pub trait TxEnvConverter: fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result; + evm_env: &EvmEnvFor, + ) -> Result, Self::Error>; } -impl TxEnvConverter for () +impl TxEnvConverter for () where - TxReq: TryIntoTxEnv, + TxReq: TryIntoTxEnv, BlockEnvFor>, + Evm: ConfigureEvm, { type Error = TxReq::Err; fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result { - tx_req.try_into_tx_env(cfg_env, block_env) + evm_env: &EvmEnvFor, + ) -> Result, Self::Error> { + tx_req.try_into_tx_env(&evm_env.cfg_env, &evm_env.block_env) } } /// Converts rpc transaction requests into transaction environment using a closure. -impl TxEnvConverter for F +impl TxEnvConverter for F where - F: Fn(TxReq, &CfgEnv, &BlockEnv) -> Result + F: Fn(TxReq, &EvmEnvFor) -> Result, E> + Debug + Send + Sync @@ -483,6 +478,7 @@ where + Clone + 'static, TxReq: Clone, + Evm: ConfigureEvm, E: error::Error + Send + Sync + 'static, { type Error = E; @@ -490,17 +486,16 @@ where fn convert_tx_env( &self, tx_req: TxReq, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result { - self(tx_req, cfg_env, block_env) + evm_env: &EvmEnvFor, + ) -> Result, Self::Error> { + self(tx_req, evm_env) } } /// Converts `self` into `T`. /// /// Should create an executable transaction environment using [`TransactionRequest`]. -pub trait TryIntoTxEnv { +pub trait TryIntoTxEnv { /// An associated error that can occur during the conversion. type Err; @@ -836,7 +831,6 @@ impl } /// Converts `self` into a boxed converter. - #[expect(clippy::type_complexity)] pub fn erased( self, ) -> Box< @@ -844,8 +838,7 @@ impl Primitives = ::Primitives, Network = ::Network, Error = ::Error, - TxEnv = ::TxEnv, - Spec = ::Spec, + Evm = ::Evm, >, > where @@ -933,13 +926,12 @@ where SimTx: SimTxConverter, TxTy>, RpcTx: RpcTxConverter, Network::TransactionResponse, >>::Out>, - TxEnv: TxEnvConverter, TxEnvFor, SpecFor>, + TxEnv: TxEnvConverter, Evm>, { type Primitives = N; + type Evm = Evm; type Network = Network; - type TxEnv = TxEnvFor; type Error = Receipt::Error; - type Spec = SpecFor; fn fill( &self, @@ -965,10 +957,9 @@ where fn tx_env( &self, request: RpcTxReq, - cfg_env: &CfgEnv>, - block_env: &BlockEnv, - ) -> Result { - self.tx_env_converter.convert_tx_env(request, cfg_env, block_env).map_err(Into::into) + evm_env: &EvmEnvFor, + ) -> Result, Self::Error> { + self.tx_env_converter.convert_tx_env(request, evm_env).map_err(Into::into) } fn convert_receipts( diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index a2293b46309..3ea12d16a63 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -71,4 +71,4 @@ op = [ "reth-rpc-convert/op", "alloy-evm/op", ] -scroll = ["reth-scroll-evm"] +scroll = ["reth-scroll-evm", "reth-rpc-convert/scroll"] diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index b96dab882a0..8f325e757f1 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -20,8 +20,8 @@ use alloy_rpc_types_eth::{ use futures::Future; use reth_errors::{ProviderError, RethError}; use reth_evm::{ - ConfigureEvm, Evm, EvmEnv, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TransactionEnv, - TxEnvFor, + env::BlockEnvironment, ConfigureEvm, Evm, EvmEnvFor, HaltReasonFor, InspectorFor, + TransactionEnv, TxEnvFor, }; use reth_node_api::BlockBody; use reth_primitives_traits::Recovered; @@ -38,6 +38,7 @@ use reth_rpc_eth_types::{ }; use reth_storage_api::{BlockIdReader, ProviderTx}; use revm::{ + context::Block, context_interface::{ result::{ExecutionResult, ResultAndState}, Transaction, @@ -115,7 +116,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA evm_env.cfg_env.disable_nonce_check = true; evm_env.cfg_env.disable_base_fee = true; evm_env.cfg_env.tx_gas_limit_cap = Some(u64::MAX); - evm_env.block_env.basefee = 0; + evm_env.block_env.inner_mut().basefee = 0; } let SimBlock { block_overrides, state_overrides, calls } = block; @@ -123,19 +124,23 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA if let Some(block_overrides) = block_overrides { // ensure we don't allow uncapped gas limit per block if let Some(gas_limit_override) = block_overrides.gas_limit && - gas_limit_override > evm_env.block_env.gas_limit && + gas_limit_override > evm_env.block_env.gas_limit() && gas_limit_override > this.call_gas_limit() { return Err(EthApiError::other(EthSimulateError::GasLimitReached).into()) } - apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); + apply_block_overrides( + block_overrides, + &mut db, + evm_env.block_env.inner_mut(), + ); } if let Some(state_overrides) = state_overrides { apply_state_overrides(state_overrides, &mut db) .map_err(Self::Error::from_eth_err)?; } - let block_gas_limit = evm_env.block_env.gas_limit; + let block_gas_limit = evm_env.block_env.gas_limit(); let chain_id = evm_env.cfg_env.chain_id; let default_gas_limit = { @@ -404,7 +409,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let cap = this.caller_gas_allowance(&mut db, &evm_env, &tx_env)?; // no gas limit was provided in the request, so we need to cap the request's gas // limit - tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); + tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit())); } // can consume the list since we're not using the request anymore @@ -461,7 +466,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// Executes code on state. pub trait Call: LoadState< - RpcConvert: RpcConvert, Spec = SpecFor>, + RpcConvert: RpcConvert, Error: FromEvmError + From<::Error> + From, @@ -520,7 +525,7 @@ pub trait Call: Ok(res) } - /// Executes the [`EvmEnv`] against the given [Database] without committing state + /// Executes the [`reth_evm::EvmEnv`] against the given [Database] without committing state /// changes. fn transact_with_inspector( &self, @@ -574,7 +579,7 @@ pub trait Call: /// Prepares the state and env for the given [`RpcTxReq`] at the given [`BlockId`] and /// executes the closure on a new task returning the result of the closure. /// - /// This returns the configured [`EvmEnv`] for the given [`RpcTxReq`] at + /// This returns the configured [`reth_evm::EvmEnv`] for the given [`RpcTxReq`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. /// /// This is primarily used by `eth_call`. @@ -712,10 +717,10 @@ pub trait Call: /// /// All `TxEnv` fields are derived from the given [`RpcTxReq`], if fields are - /// `None`, they fall back to the [`EvmEnv`]'s settings. + /// `None`, they fall back to the [`reth_evm::EvmEnv`]'s settings. fn create_txn_env( &self, - evm_env: &EvmEnv>, + evm_env: &EvmEnvFor, mut request: RpcTxReq<::Network>, mut db: impl Database>, ) -> Result, Self::Error> { @@ -728,10 +733,10 @@ pub trait Call: request.as_mut().set_nonce(nonce); } - Ok(self.tx_resp_builder().tx_env(request, &evm_env.cfg_env, &evm_env.block_env)?) + Ok(self.tx_resp_builder().tx_env(request, evm_env)?) } - /// Prepares the [`EvmEnv`] for execution of calls. + /// Prepares the [`reth_evm::EvmEnv`] for execution of calls. /// /// Does not commit any changes to the underlying database. /// @@ -790,7 +795,7 @@ pub trait Call: request.as_mut().take_nonce(); if let Some(block_overrides) = overrides.block { - apply_block_overrides(*block_overrides, db, &mut evm_env.block_env); + apply_block_overrides(*block_overrides, db, evm_env.block_env.inner_mut()); } if let Some(state_overrides) = overrides.state { apply_state_overrides(state_overrides, db) @@ -801,7 +806,7 @@ pub trait Call: // lower the basefee to 0 to avoid breaking EVM invariants (basefee < gasprice): if tx_env.gas_price() == 0 { - evm_env.block_env.basefee = 0; + evm_env.block_env.inner_mut().basefee = 0; } if !request_has_gas_limit { @@ -811,7 +816,7 @@ pub trait Call: trace!(target: "rpc::eth::call", ?tx_env, "Applying gas limit cap with caller allowance"); let cap = self.caller_gas_allowance(db, &evm_env, &tx_env)?; // ensure we cap gas_limit to the block's - tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); + tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit())); } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index cca674e9739..cd2518345ce 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -18,7 +18,10 @@ use reth_rpc_eth_types::{ }; use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; use reth_storage_api::StateProvider; -use revm::context_interface::{result::ExecutionResult, Transaction}; +use revm::{ + context::Block, + context_interface::{result::ExecutionResult, Transaction}, +}; use tracing::trace; /// Gas execution estimates @@ -60,10 +63,10 @@ pub trait EstimateCall: Call { let tx_request_gas_limit = request.as_ref().gas_limit(); let tx_request_gas_price = request.as_ref().gas_price(); // the gas limit of the corresponding block - let max_gas_limit = evm_env - .cfg_env - .tx_gas_limit_cap - .map_or(evm_env.block_env.gas_limit, |cap| cap.min(evm_env.block_env.gas_limit)); + let max_gas_limit = evm_env.cfg_env.tx_gas_limit_cap.map_or_else( + || evm_env.block_env.gas_limit(), + |cap| cap.min(evm_env.block_env.gas_limit()), + ); // Determine the highest possible gas limit, considering both the request's specified limit // and the block's limit. diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 25ac5806647..cbbb6d6c679 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -13,7 +13,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError, RethError}; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome, ExecutionOutcome}, - ConfigureEvm, Evm, NextBlockEnvAttributes, SpecFor, + ConfigureEvm, Evm, NextBlockEnvAttributes, }; use reth_primitives_traits::{transaction::error::InvalidTransactionError, HeaderTy, SealedHeader}; use reth_revm::{database::StateProviderDatabase, db::State}; @@ -23,8 +23,8 @@ use reth_rpc_eth_types::{ PendingBlockEnv, PendingBlockEnvOrigin, }; use reth_storage_api::{ - noop::NoopProvider, BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, - ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderBox, StateProviderFactory, + noop::NoopProvider, BlockReader, BlockReaderIdExt, ProviderHeader, ProviderTx, ReceiptProvider, + StateProviderBox, StateProviderFactory, }; use reth_transaction_pool::{ error::InvalidPoolTransactionError, BestTransactions, BestTransactionsAttributes, @@ -61,17 +61,7 @@ pub trait LoadPendingBlock: /// Configures the [`PendingBlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block - #[expect(clippy::type_complexity)] - fn pending_block_env_and_cfg( - &self, - ) -> Result< - PendingBlockEnv< - ProviderBlock, - ProviderReceipt, - SpecFor, - >, - Self::Error, - > { + fn pending_block_env_and_cfg(&self) -> Result, Self::Error> { if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? && let Some(receipts) = self .provider() @@ -166,7 +156,7 @@ pub trait LoadPendingBlock: // Is the pending block cached? if let Some(pending_block) = lock.as_ref() { // Is the cached block not expired and latest is its parent? - if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) && + if pending.evm_env.block_env.number() == U256::from(pending_block.block().number()) && parent.hash() == pending_block.block().parent_hash() && now <= pending_block.expires_at { @@ -265,14 +255,14 @@ pub trait LoadPendingBlock: .unwrap_or_else(BlobParams::cancun); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; - let block_gas_limit: u64 = block_env.gas_limit; + let block_gas_limit: u64 = block_env.gas_limit(); // Only include transactions if not configured as Empty if !self.pending_block_kind().is_empty() { let mut best_txs = self .pool() .best_transactions_with_attributes(BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|gasprice| gasprice as u64), )) // freeze to get a block as fast as possible diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index a3c79416cfe..86039e38082 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -19,14 +19,14 @@ use reth_rpc_eth_types::{ EthApiError, }; use reth_storage_api::{ProviderBlock, ProviderTx}; -use revm::{context_interface::result::ResultAndState, DatabaseCommit}; +use revm::{context::Block, context_interface::result::ResultAndState, DatabaseCommit}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::sync::Arc; /// Executes CPU heavy tasks. pub trait Trace: LoadState> { - /// Executes the [`TxEnvFor`] with [`EvmEnvFor`] against the given [Database] without committing - /// state changes. + /// Executes the [`TxEnvFor`] with [`reth_evm::EvmEnv`] against the given [Database] without + /// committing state changes. fn inspect( &self, db: DB, @@ -301,8 +301,8 @@ pub trait Trace: LoadState> { let state_at = block.parent_hash(); let block_hash = block.hash(); - let block_number = evm_env.block_env.number.saturating_to(); - let base_fee = evm_env.block_env.basefee; + let block_number = evm_env.block_env.number().saturating_to(); + let base_fee = evm_env.block_env.basefee(); // now get the state let state = this.state_at_block_id(state_at.into()).await?; diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 1f3ee7dd6dd..196461d18ce 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -681,7 +681,7 @@ impl RpcInvalidTransactionError { /// Converts the halt error /// /// Takes the configured gas limit of the transaction which is attached to the error - pub const fn halt(reason: HaltReason, gas_limit: u64) -> Self { + pub fn halt(reason: HaltReason, gas_limit: u64) -> Self { match reason { HaltReason::OutOfGas(err) => Self::out_of_gas(err, gas_limit), HaltReason::NonceOverflow => Self::NonceMaxValue, @@ -762,7 +762,7 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::BlobVersionedHashesNotSupported => { Self::BlobVersionedHashesNotSupported } - InvalidTransaction::BlobGasPriceGreaterThanMax => Self::BlobFeeCapTooLow, + InvalidTransaction::BlobGasPriceGreaterThanMax { .. } => Self::BlobFeeCapTooLow, InvalidTransaction::EmptyBlobs => Self::BlobTransactionMissingBlobHashes, InvalidTransaction::BlobVersionNotSupported => Self::BlobHashVersionMismatch, InvalidTransaction::TooManyBlobs { have, .. } => Self::TooManyBlobs { have }, @@ -780,6 +780,7 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::Eip7873MissingTarget => { Self::other(internal_rpc_err(err.to_string())) } + InvalidTransaction::Str(_) => Self::other(internal_rpc_err(err.to_string())), } } } diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 05ad6fb4e27..d0b5c65c1ed 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -13,18 +13,18 @@ use reth_chain_state::{ BlockState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, }; use reth_ethereum_primitives::Receipt; -use reth_evm::EvmEnv; +use reth_evm::{ConfigureEvm, EvmEnvFor}; use reth_primitives_traits::{ Block, BlockTy, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, }; -/// Configured [`EvmEnv`] for a pending block. +/// Configured [`reth_evm::EvmEnv`] for a pending block. #[derive(Debug, Clone, Constructor)] -pub struct PendingBlockEnv { - /// Configured [`EvmEnv`] for the pending block. - pub evm_env: EvmEnv, +pub struct PendingBlockEnv { + /// Configured [`reth_evm::EvmEnv`] for the pending block. + pub evm_env: EvmEnvFor, /// Origin block for the config - pub origin: PendingBlockEnvOrigin, + pub origin: PendingBlockEnvOrigin, ReceiptTy>, } /// The origin for a configured [`PendingBlockEnv`] diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 5492e127b77..ec63443da3d 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -24,6 +24,7 @@ use reth_rpc_convert::{RpcBlock, RpcConvert, RpcTxReq}; use reth_rpc_server_types::result::rpc_err; use reth_storage_api::noop::NoopProvider; use revm::{ + context::Block, context_interface::result::ExecutionResult, primitives::{Address, Bytes, TxKind}, Database, @@ -88,7 +89,7 @@ where let tx = resolve_transaction( call, default_gas_limit, - builder.evm().block().basefee, + builder.evm().block().basefee(), chain_id, builder.evm_mut().db_mut(), tx_resp_builder, diff --git a/crates/rpc/rpc/src/aliases.rs b/crates/rpc/rpc/src/aliases.rs index 4e317305ca4..8854f1b607d 100644 --- a/crates/rpc/rpc/src/aliases.rs +++ b/crates/rpc/rpc/src/aliases.rs @@ -1,4 +1,4 @@ -use reth_evm::{ConfigureEvm, SpecFor, TxEnvFor}; +use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_types::EthApiError; @@ -8,7 +8,6 @@ pub type DynRpcConverter = Box< Primitives = ::Primitives, Network = Network, Error = Error, - TxEnv = TxEnvFor, - Spec = SpecFor, + Evm = Evm, >, >; diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index bca7a85c9dc..ccf753db3f4 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -3,6 +3,7 @@ use alloy_consensus::{ BlockHeader, }; use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; +use alloy_evm::env::BlockEnvironment; use alloy_genesis::ChainConfig; use alloy_primitives::{uint, Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable}; @@ -40,7 +41,7 @@ use reth_storage_api::{ }; use reth_tasks::pool::BlockingTaskGuard; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; -use revm::{context_interface::Transaction, state::EvmState, DatabaseCommit}; +use revm::{context::Block, context_interface::Transaction, state::EvmState, DatabaseCommit}; use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, }; @@ -372,8 +373,8 @@ where let db = db.0; let tx_info = TransactionInfo { - block_number: Some(evm_env.block_env.number.saturating_to()), - base_fee: Some(evm_env.block_env.basefee), + block_number: Some(evm_env.block_env.number().saturating_to()), + base_fee: Some(evm_env.block_env.basefee()), hash: None, block_hash: None, index: None, @@ -589,8 +590,8 @@ where results.push(trace); } // Increment block_env number and timestamp for the next bundle - evm_env.block_env.number += uint!(1_U256); - evm_env.block_env.timestamp += uint!(12_U256); + evm_env.block_env.inner_mut().number += uint!(1_U256); + evm_env.block_env.inner_mut().timestamp += uint!(12_U256); all_bundles.push(results); } @@ -748,8 +749,8 @@ where .map(|c| c.tx_index.map(|i| i as u64)) .unwrap_or_default(), block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), - block_number: Some(evm_env.block_env.number.saturating_to()), - base_fee: Some(evm_env.block_env.basefee), + block_number: Some(evm_env.block_env.number().saturating_to()), + base_fee: Some(evm_env.block_env.basefee()), }; if let Some(tracer) = tracer { diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 0303c78e4be..b541f232473 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -2,12 +2,12 @@ use alloy_consensus::{transaction::TxHashRef, EnvKzgSettings, Transaction as _}; use alloy_eips::eip7840::BlobParams; +use alloy_evm::env::BlockEnvironment; use alloy_primitives::{uint, Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ConfigureEvm, Evm}; - use reth_revm::{database::StateProviderDatabase, db::CacheDB}; use reth_rpc_eth_api::{ helpers::{Call, EthTransactions, LoadPendingBlock}, @@ -18,7 +18,9 @@ use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{ EthBlobTransactionSidecar, EthPoolTransaction, PoolPooledTx, PoolTransaction, TransactionPool, }; -use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef}; +use revm::{ + context::Block, context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef, +}; use std::sync::Arc; /// `Eth` bundle implementation. @@ -89,18 +91,18 @@ where let (mut evm_env, at) = self.eth_api().evm_env_at(block_id).await?; if let Some(coinbase) = coinbase { - evm_env.block_env.beneficiary = coinbase; + evm_env.block_env.inner_mut().beneficiary = coinbase; } // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { - evm_env.block_env.timestamp = U256::from(timestamp); + evm_env.block_env.inner_mut().timestamp = U256::from(timestamp); } else { - evm_env.block_env.timestamp += uint!(12_U256); + evm_env.block_env.inner_mut().timestamp += uint!(12_U256); } if let Some(difficulty) = difficulty { - evm_env.block_env.difficulty = U256::from(difficulty); + evm_env.block_env.inner_mut().difficulty = U256::from(difficulty); } // Validate that the bundle does not contain more than MAX_BLOB_NUMBER_PER_BLOCK blob @@ -111,7 +113,7 @@ where .eth_api() .provider() .chain_spec() - .blob_params_at_timestamp(evm_env.block_env.timestamp.saturating_to()) + .blob_params_at_timestamp(evm_env.block_env.timestamp().saturating_to()) .unwrap_or_else(BlobParams::cancun); if transactions.iter().filter_map(|tx| tx.blob_gas_used()).sum::() > blob_params.max_blob_gas_per_block() @@ -125,30 +127,30 @@ where } // default to call gas limit unless user requests a smaller limit - evm_env.block_env.gas_limit = self.inner.eth_api.call_gas_limit(); + evm_env.block_env.inner_mut().gas_limit = self.inner.eth_api.call_gas_limit(); if let Some(gas_limit) = gas_limit { - if gas_limit > evm_env.block_env.gas_limit { + if gas_limit > evm_env.block_env.gas_limit() { return Err( EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() ) } - evm_env.block_env.gas_limit = gas_limit; + evm_env.block_env.inner_mut().gas_limit = gas_limit; } if let Some(base_fee) = base_fee { - evm_env.block_env.basefee = base_fee.try_into().unwrap_or(u64::MAX); + evm_env.block_env.inner_mut().basefee = base_fee.try_into().unwrap_or(u64::MAX); } - let state_block_number = evm_env.block_env.number; + let state_block_number = evm_env.block_env.number(); // use the block number of the request - evm_env.block_env.number = U256::from(block_number); + evm_env.block_env.inner_mut().number = U256::from(block_number); let eth_api = self.eth_api().clone(); self.eth_api() .spawn_with_state_at_block(at, move |state| { - let coinbase = evm_env.block_env.beneficiary; - let basefee = evm_env.block_env.basefee; + let coinbase = evm_env.block_env.beneficiary(); + let basefee = evm_env.block_env.basefee(); let db = CacheDB::new(StateProviderDatabase::new(state)); let initial_coinbase = db diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index a76e146042d..abe06cb55ec 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,7 +1,6 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. use crate::EthApi; -use reth_evm::{SpecFor, TxEnvFor}; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, @@ -13,12 +12,7 @@ impl EthCall for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -26,12 +20,7 @@ impl Call for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -48,11 +37,6 @@ impl EstimateCall for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = EthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 8c7d382c173..fb7f69b61f5 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -2,7 +2,7 @@ use alloy_consensus::{transaction::TxHashRef, BlockHeader}; use alloy_eips::BlockNumberOrTag; -use alloy_evm::overrides::apply_block_overrides; +use alloy_evm::{env::BlockEnvironment, overrides::apply_block_overrides}; use alloy_primitives::U256; use alloy_rpc_types_eth::BlockId; use alloy_rpc_types_mev::{ @@ -22,7 +22,9 @@ use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_storage_api::ProviderTx; use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; -use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef}; +use revm::{ + context::Block, context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef, +}; use std::{sync::Arc, time::Duration}; use tracing::trace; @@ -243,12 +245,12 @@ where .spawn_with_state_at_block(current_block_id, move |state| { // Setup environment let current_block_number = current_block.number(); - let coinbase = evm_env.block_env.beneficiary; - let basefee = evm_env.block_env.basefee; + let coinbase = evm_env.block_env.beneficiary(); + let basefee = evm_env.block_env.basefee(); let mut db = CacheDB::new(StateProviderDatabase::new(state)); // apply overrides - apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); + apply_block_overrides(block_overrides, &mut db, evm_env.block_env.inner_mut()); let initial_coinbase_balance = DatabaseRef::basic_ref(&db, coinbase) .map_err(EthApiError::from_eth_err)? diff --git a/crates/scroll/alloy/evm/src/block/mod.rs b/crates/scroll/alloy/evm/src/block/mod.rs index 6b221ff1506..610f67122ea 100644 --- a/crates/scroll/alloy/evm/src/block/mod.rs +++ b/crates/scroll/alloy/evm/src/block/mod.rs @@ -28,7 +28,7 @@ use alloy_primitives::{B256, U256}; use revm::{ context::{ result::{InvalidTransaction, ResultAndState}, - TxEnv, + Block, TxEnv, }, database::State, handler::PrecompileProvider, @@ -154,7 +154,7 @@ where fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> { // set state clear flag if the block is after the Spurious Dragon hardfork. let state_clear_flag = - self.spec.is_spurious_dragon_active_at_block(self.evm.block().number.to()); + self.spec.is_spurious_dragon_active_at_block(self.evm.block().number().to()); self.evm.db_mut().set_state_clear_flag(state_clear_flag); // load the l1 gas oracle contract in cache. @@ -169,7 +169,7 @@ where if self .spec .scroll_fork_activation(ScrollHardfork::Curie) - .transitions_at_block(self.evm.block().number.to()) + .transitions_at_block(self.evm.block().number().to()) { if let Err(err) = apply_curie_hard_fork(self.evm.db_mut()) { return Err(BlockExecutionError::msg(format!( @@ -183,7 +183,7 @@ where if self .spec .scroll_fork_activation(ScrollHardfork::Feynman) - .active_at_timestamp(self.evm.block().timestamp.to()) + .active_at_timestamp(self.evm.block().timestamp().to()) { if let Err(err) = apply_feynman_hard_fork(self.evm.db_mut()) { return Err(BlockExecutionError::msg(format!( @@ -206,7 +206,7 @@ where let is_l1_message = tx.tx().ty() == L1_MESSAGE_TRANSACTION_TYPE; // The sum of the transaction’s gas limit and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. - let block_available_gas = self.evm.block().gas_limit - self.gas_used; + let block_available_gas = self.evm.block().gas_limit() - self.gas_used; if tx.tx().gas_limit() > block_available_gas { return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { transaction_gas_limit: tx.tx().gas_limit(), @@ -219,14 +219,14 @@ where let block = self.evm.block(); // verify the transaction type is accepted by the current fork. - if tx.tx().is_eip2930() && !chain_spec.is_curie_active_at_block(block.number.to()) { + if tx.tx().is_eip2930() && !chain_spec.is_curie_active_at_block(block.number().to()) { return Err(BlockValidationError::InvalidTx { hash, error: Box::new(InvalidTransaction::Eip2930NotSupported), } .into()) } - if tx.tx().is_eip1559() && !chain_spec.is_curie_active_at_block(block.number.to()) { + if tx.tx().is_eip1559() && !chain_spec.is_curie_active_at_block(block.number().to()) { return Err(BlockValidationError::InvalidTx { hash, error: Box::new(InvalidTransaction::Eip1559NotSupported), @@ -241,7 +241,7 @@ where .into()) } if tx.tx().is_eip7702() && - !chain_spec.is_euclid_v2_active_at_timestamp(block.timestamp.to()) + !chain_spec.is_euclid_v2_active_at_timestamp(block.timestamp().to()) { return Err(BlockValidationError::InvalidTx { hash, @@ -296,6 +296,7 @@ where receipts: self.receipts, requests: Default::default(), gas_used: self.gas_used, + blob_gas_used: 0, }, )) } diff --git a/crates/scroll/alloy/evm/src/lib.rs b/crates/scroll/alloy/evm/src/lib.rs index f8a7fda6db6..cf0c3f8837b 100644 --- a/crates/scroll/alloy/evm/src/lib.rs +++ b/crates/scroll/alloy/evm/src/lib.rs @@ -112,10 +112,11 @@ where type Error = EVMError; type HaltReason = HaltReason; type Spec = ScrollSpecId; + type BlockEnv = BlockEnv; type Precompiles = P; type Inspector = I; - fn block(&self) -> &BlockEnv { + fn block(&self) -> &Self::BlockEnv { &self.block } @@ -207,6 +208,7 @@ impl EvmFactory for ScrollEvmFactory

{ type Error = EVMError; type HaltReason = HaltReason; type Spec = ScrollSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm( diff --git a/crates/scroll/alloy/evm/src/system_caller.rs b/crates/scroll/alloy/evm/src/system_caller.rs index f57d3473b72..12f87c4d4a0 100644 --- a/crates/scroll/alloy/evm/src/system_caller.rs +++ b/crates/scroll/alloy/evm/src/system_caller.rs @@ -6,7 +6,10 @@ use alloy_evm::{ Evm, }; use alloy_primitives::B256; -use revm::{context::result::ResultAndState, DatabaseCommit}; +use revm::{ + context::{result::ResultAndState, Block}, + DatabaseCommit, +}; use scroll_alloy_hardforks::ScrollHardforks; /// An ephemeral helper type for executing system calls. @@ -62,13 +65,13 @@ fn transact_blockhashes_contract_call( evm: &mut impl Evm, ) -> Result>, BlockExecutionError> { // if Feynman is not active at timestamp then no system transaction occurs. - if !spec.is_feynman_active_at_timestamp(evm.block().timestamp.to()) { + if !spec.is_feynman_active_at_timestamp(evm.block().timestamp().to()) { return Ok(None); } // if the block number is zero (genesis block) then no system transaction may occur as per // EIP-2935 - if evm.block().number.to::() == 0u64 { + if evm.block().number().to::() == 0u64 { return Ok(None); } diff --git a/crates/scroll/evm/src/build.rs b/crates/scroll/evm/src/build.rs index 2645dad697b..ad39ff5a4ec 100644 --- a/crates/scroll/evm/src/build.rs +++ b/crates/scroll/evm/src/build.rs @@ -7,6 +7,7 @@ use reth_evm::execute::{BlockAssembler, BlockAssemblerInput}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::SignedTransaction; use reth_scroll_primitives::ScrollReceipt; +use revm::context::Block; use scroll_alloy_evm::ScrollBlockExecutionCtx; use scroll_alloy_hardforks::ScrollHardforks; @@ -53,7 +54,7 @@ where .. } = input; - let timestamp = evm_env.block_env.timestamp; + let timestamp = evm_env.block_env.timestamp(); let transactions_root = proofs::calculate_transaction_root(&transactions); let receipts_root = ScrollReceipt::calculate_receipt_root_no_memo(receipts); @@ -69,15 +70,15 @@ where withdrawals_root: None, logs_bloom, timestamp: timestamp.to(), - mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(), + mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(), nonce: BEACON_NONCE.into(), base_fee_per_gas: self .chain_spec - .is_curie_active_at_block(evm_env.block_env.number.to()) - .then_some(evm_env.block_env.basefee), - number: evm_env.block_env.number.to(), - gas_limit: evm_env.block_env.gas_limit, - difficulty: evm_env.block_env.difficulty, + .is_curie_active_at_block(evm_env.block_env.number().to()) + .then_some(evm_env.block_env.basefee()), + number: evm_env.block_env.number().to(), + gas_limit: evm_env.block_env.gas_limit(), + difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, extra_data: Default::default(), parent_beacon_block_root: None, diff --git a/crates/scroll/node/Cargo.toml b/crates/scroll/node/Cargo.toml index 319a5da9a34..3ee16294467 100644 --- a/crates/scroll/node/Cargo.toml +++ b/crates/scroll/node/Cargo.toml @@ -28,6 +28,7 @@ reth-payload-builder.workspace = true reth-primitives = { workspace = true, features = ["c-kzg"] } reth-primitives-traits.workspace = true reth-provider.workspace = true +reth-revm.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } @@ -60,6 +61,7 @@ scroll-alloy-consensus.workspace = true scroll-alloy-evm.workspace = true scroll-alloy-hardforks.workspace = true scroll-alloy-network.workspace = true +scroll-alloy-rpc-types.workspace = true scroll-alloy-rpc-types-engine.workspace = true # misc diff --git a/crates/scroll/node/src/addons.rs b/crates/scroll/node/src/addons.rs index 9b2bce66377..a30d24d45b1 100644 --- a/crates/scroll/node/src/addons.rs +++ b/crates/scroll/node/src/addons.rs @@ -12,6 +12,7 @@ use reth_node_builder::{ FullNodeComponents, }; use reth_node_types::NodeTypes; +use reth_revm::context::BlockEnv; use reth_rpc_eth_types::error::FromEvmError; use reth_scroll_chainspec::ScrollChainSpec; use reth_scroll_engine_primitives::ScrollEngineTypes; @@ -98,7 +99,7 @@ where >, >, ScrollEthApiError: FromEvmError, - EvmFactoryFor: EvmFactory>, + EvmFactoryFor: EvmFactory, BlockEnv = BlockEnv>, RpcMiddleware: RethRpcMiddleware, { type Handle = RpcHandle>::EthApi>; @@ -124,7 +125,7 @@ where >, >, ScrollEthApiError: FromEvmError, - EvmFactoryFor: EvmFactory>, + EvmFactoryFor: EvmFactory, BlockEnv = BlockEnv>, RpcMiddleware: RethRpcMiddleware, { type EthApi = >::EthApi; diff --git a/crates/scroll/openvm-compat/Cargo.lock b/crates/scroll/openvm-compat/Cargo.lock index 3bf6f91d724..ed661f8cf46 100644 --- a/crates/scroll/openvm-compat/Cargo.lock +++ b/crates/scroll/openvm-compat/Cargo.lock @@ -22,9 +22,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.5" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5674914c2cfdb866c21cb0c09d82374ee39a1395cf512e7515f4c014083b3fff" +checksum = "bfaa9ea039a6f9304b4a593d780b1f23e1ae183acdee938b11b38795acacc9f1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -35,9 +35,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.37" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59094911f05dbff1cf5b29046a00ef26452eccc8d47136d50a47c0cf22f00c85" +checksum = "ad704069c12f68d0c742d0cad7e0a03882b42767350584627fbf8a47b1bf1846" dependencies = [ "alloy-eips", "alloy-primitives", @@ -46,6 +46,7 @@ dependencies = [ "alloy-trie", "alloy-tx-macros", "auto_impl", + "borsh", "c-kzg", "derive_more", "either", @@ -61,9 +62,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.37" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903cb8f728107ca27c816546f15be38c688df3c381d7bd1a4a9f215effc1ddb4" +checksum = "bc374f640a5062224d7708402728e3d6879a514ba10f377da62e7dfb14c673e6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -88,23 +89,25 @@ dependencies = [ [[package]] name = "alloy-eip2930" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b82752a889170df67bbb36d42ca63c531eb16274f0d7299ae2a680facba17bd" +checksum = "9441120fa82df73e8959ae0e4ab8ade03de2aaae61be313fbf5746277847ce25" dependencies = [ "alloy-primitives", "alloy-rlp", + "borsh", "serde", ] [[package]] name = "alloy-eip7702" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d4769c6ffddca380b0070d71c8b7f30bed375543fe76bb2f74ec0acf4b7cd16" +checksum = "2919c5a56a1007492da313e7a3b6d45ef5edc5d33416fdec63c0d7a2702a0d20" dependencies = [ "alloy-primitives", "alloy-rlp", + "borsh", "k256", "serde", "serde_with", @@ -113,9 +116,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.37" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac7f1c9a1ccc7f3e03c36976455751a6166a4f0d2d2c530c3f87dfe7d0cdc836" +checksum = "7e867b5fd52ed0372a95016f3a37cbff95a9d5409230fbaef2d8ea00e8618098" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -124,6 +127,7 @@ dependencies = [ "alloy-rlp", "alloy-serde", "auto_impl", + "borsh", "c-kzg", "derive_more", "either", @@ -135,9 +139,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.21.2" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a5f67ee74999aa4fe576a83be1996bdf74a30fce3d248bf2007d6fc7dae8aa" +checksum = "08e9e656d58027542447c1ca5aa4ca96293f09e6920c4651953b7451a7c35e4e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -152,23 +156,24 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.37" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1421f6c9d15e5b86afbfe5865ca84dea3b9f77173a0963c1a2ee4e626320ada9" +checksum = "b90be17e9760a6ba6d13cebdb049cea405ebc8bf57d90664ed708cc5bc348342" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-serde", "alloy-trie", + "borsh", "serde", "serde_with", ] [[package]] name = "alloy-hardforks" -version = "0.3.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "889eb3949b58368a09d4f16931c660275ef5fb08e5fbd4a96573b19c7085c41f" +checksum = "1e29d7eacf42f89c21d7f089916d0bdb4f36139a31698790e8837d2dbbd4b2c3" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -179,9 +184,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.37" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46e9374c667c95c41177602ebe6f6a2edd455193844f011d973d374b65501b38" +checksum = "0d3ae2777e900a7a47ad9e3b8ab58eff3d93628265e73bbdee09acf90bf68f75" dependencies = [ "alloy-consensus", "alloy-eips", @@ -192,9 +197,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.3.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9485c56de23438127a731a6b4c87803d49faf1a7068dcd1d8768aca3a9edb9" +checksum = "355bf68a433e0fd7f7d33d5a9fc2583fde70bf5c530f63b80845f8da5505cf28" dependencies = [ "alloy-rlp", "bytes", @@ -202,14 +207,14 @@ dependencies = [ "const-hex", "derive_more", "foldhash", - "hashbrown 0.15.4", - "indexmap 2.10.0", + "hashbrown 0.16.0", + "indexmap 2.12.0", "itoa", "k256", "keccak-asm", "paste", "proptest", - "rand 0.9.1", + "rand 0.9.2", "ruint", "rustc-hash", "serde", @@ -236,14 +241,14 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "alloy-rpc-types-engine" -version = "1.0.37" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222ecadcea6aac65e75e32b6735635ee98517aa63b111849ee01ae988a71d685" +checksum = "9981491bb98e76099983f516ec7de550db0597031f5828c994961eb4bb993cce" dependencies = [ "alloy-consensus", "alloy-eips", @@ -255,9 +260,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.37" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db46b0901ee16bbb68d986003c66dcb74a12f9d9b3c44f8e85d51974f2458f0f" +checksum = "29031a6bf46177d65efce661f7ab37829ca09dd341bc40afb5194e97600655cc" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -276,9 +281,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.37" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5413814be7a22fbc81e0f04a2401fcc3eb25e56fd53b04683e8acecc6e1fe01b" +checksum = "01e856112bfa0d9adc85bd7c13db03fad0e71d1d6fb4c2010e475b6718108236" dependencies = [ "alloy-primitives", "serde", @@ -287,41 +292,41 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedac07a10d4c2027817a43cc1f038313fc53c7ac866f7363239971fd01f9f18" +checksum = "f3ce480400051b5217f19d6e9a82d9010cdde20f1ae9c00d53591e4a1afbb312" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "alloy-sol-macro-expander" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24f9a598f010f048d8b8226492b6401104f5a5c1273c2869b72af29b48bb4ba9" +checksum = "6d792e205ed3b72f795a8044c52877d2e6b6e9b1d13f431478121d8d4eaa9028" dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.10.0", + "indexmap 2.12.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f494adf9d60e49aa6ce26dfd42c7417aa6d4343cf2ae621f20e4d92a5ad07d85" +checksum = "0bd1247a8f90b465ef3f1207627547ec16940c35597875cdc09c49d58b19693c" dependencies = [ "const-hex", "dunce", @@ -329,15 +334,15 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", "syn-solidity", ] [[package]] name = "alloy-sol-types" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a285b46e3e0c177887028278f04cc8262b76fd3b8e0e20e93cea0a58c35f5ac5" +checksum = "70319350969a3af119da6fb3e9bddb1bce66c9ea933600cb297c8b1850ad2a3c" dependencies = [ "alloy-primitives", "alloy-sol-macro", @@ -361,23 +366,16 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.37" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64c09ec565a90ed8390d82aa08cd3b22e492321b96cb4a3d4f58414683c9e2f" +checksum = "7ccf423f6de62e8ce1d6c7a11fb7508ae3536d02e0d68aaeb05c8669337d0937" dependencies = [ - "alloy-primitives", - "darling 0.21.3", + "darling", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -423,7 +421,7 @@ dependencies = [ "ark-std 0.5.0", "educe", "fnv", - "hashbrown 0.15.4", + "hashbrown 0.15.5", "itertools 0.13.0", "num-bigint", "num-integer", @@ -516,7 +514,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -554,7 +552,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -569,7 +567,7 @@ dependencies = [ "ark-std 0.5.0", "educe", "fnv", - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -614,7 +612,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -680,7 +678,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -740,11 +738,11 @@ dependencies = [ [[package]] name = "bitflags" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -771,9 +769,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" +checksum = "dcdb4c7013139a150f9fc55d123186dbfaba0d912817466282c73ac49e71fb45" dependencies = [ "cc", "glob", @@ -781,6 +779,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "bumpalo" version = "3.19.0" @@ -795,9 +816,9 @@ checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" [[package]] name = "bytemuck" -version = "1.23.1" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" [[package]] name = "byteorder" @@ -816,9 +837,9 @@ dependencies = [ [[package]] name = "c-kzg" -version = "2.1.1" +version = "2.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7318cfa722931cb5fe0838b98d3ce5621e75f6a6408abc21721d80de9223f2e4" +checksum = "e00bf4b112b07b505472dbefd19e37e53307e2bfed5a79e0cc161d58ccd0e687" dependencies = [ "blst", "cc", @@ -831,10 +852,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.15" +version = "1.2.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c736e259eea577f443d5c86c304f9f4ae0295c43f3ba05c21f1d66b5f06001af" +checksum = "b97463e1064cb1b1c1384ad0a0b9c8abd0988e2a91f52606c80ef14aadb63e36" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -842,17 +864,22 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "num-traits", "serde", @@ -861,15 +888,14 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.14.1" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83e22e0ed40b96a48d3db274f72fd365bd78f67af39b6bbd47e8a15e1c6207ff" +checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" dependencies = [ "cfg-if", "cpufeatures", - "hex", "proptest", - "serde", + "serde_core", ] [[package]] @@ -880,9 +906,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_format" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" dependencies = [ "const_format_proc_macros", ] @@ -971,38 +997,14 @@ dependencies = [ "typenum", ] -[[package]] -name = "darling" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" -dependencies = [ - "darling_core 0.20.11", - "darling_macro 0.20.11", -] - [[package]] name = "darling" version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ - "darling_core 0.21.3", - "darling_macro 0.21.3", -] - -[[package]] -name = "darling_core" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn 2.0.104", + "darling_core", + "darling_macro", ] [[package]] @@ -1017,18 +1019,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.104", -] - -[[package]] -name = "darling_macro" -version = "0.20.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" -dependencies = [ - "darling_core 0.20.11", - "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -1037,9 +1028,9 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "darling_core 0.21.3", + "darling_core", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -1054,12 +1045,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.0" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" dependencies = [ "powerfmt", - "serde", + "serde_core", ] [[package]] @@ -1075,13 +1066,13 @@ dependencies = [ [[package]] name = "derive-where" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" +checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -1102,7 +1093,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", "unicode-xid", ] @@ -1135,7 +1126,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -1146,9 +1137,9 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "dyn-clone" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "ecdsa" @@ -1174,7 +1165,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -1208,22 +1199,22 @@ dependencies = [ [[package]] name = "enum-ordinalize" -version = "4.3.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" dependencies = [ "enum-ordinalize-derive", ] [[package]] name = "enum-ordinalize-derive" -version = "4.3.1" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -1234,7 +1225,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -1245,12 +1236,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys", ] [[package]] @@ -1291,6 +1282,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + [[package]] name = "fixed-hash" version = "0.8.0" @@ -1311,15 +1308,15 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.5" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] @@ -1356,9 +1353,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.7" +version = "0.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" dependencies = [ "typenum", "version_check", @@ -1373,26 +1370,26 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi", ] [[package]] name = "getrandom" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "libc", "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasip2", ] [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "group" @@ -1413,11 +1410,19 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +dependencies = [ "foldhash", "serde", ] @@ -1439,9 +1444,6 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -dependencies = [ - "serde", -] [[package]] name = "hex-conservative" @@ -1463,9 +1465,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1487,9 +1489,9 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" dependencies = [ "displaydoc", "potential_utf", @@ -1500,9 +1502,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", "litemap", @@ -1513,11 +1515,10 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" dependencies = [ - "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", @@ -1528,42 +1529,38 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" dependencies = [ - "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", - "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" [[package]] name = "icu_provider" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", "icu_locale_core", - "stable_deref_trait", - "tinystr", "writeable", "yoke", "zerofrom", @@ -1579,9 +1576,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -1615,7 +1612,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -1631,13 +1628,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" dependencies = [ "equivalent", - "hashbrown 0.15.4", + "hashbrown 0.16.0", "serde", + "serde_core", ] [[package]] @@ -1675,19 +1673,19 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" dependencies = [ "once_cell", "wasm-bindgen", @@ -1726,17 +1724,11 @@ dependencies = [ "sha3-asm", ] -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" - [[package]] name = "libc" -version = "0.2.174" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libm" @@ -1746,21 +1738,21 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "macro-string" @@ -1770,14 +1762,14 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "memchr" -version = "2.7.5" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "modular-bitfield" @@ -1893,9 +1885,9 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" dependencies = [ "num_enum_derive", "rustversion", @@ -1903,13 +1895,13 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -1926,9 +1918,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0418987d1aaed324d95b4beffc93635e19be965ed5d63ec07a35980fe3b71a4" +checksum = "2c4b5ecbd0beec843101bffe848217f770e8b8da81d8355b7d6e226f2199b3dc" dependencies = [ "alloy-rlp", "cfg-if", @@ -2008,7 +2000,7 @@ source = "git+https://github.com/openvm-org/openvm.git?tag=v1.4.0#39ee587f0f7364 dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -2098,7 +2090,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -2109,26 +2101,25 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.1" +version = "2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" +checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" dependencies = [ "memchr", - "thiserror", "ucd-trie", ] [[package]] name = "phf" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" dependencies = [ "phf_macros", "phf_shared", @@ -2137,32 +2128,32 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" dependencies = [ + "fastrand", "phf_shared", - "rand 0.8.5", ] [[package]] name = "phf_macros" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" +checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" dependencies = [ "phf_generator", "phf_shared", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "phf_shared" -version = "0.11.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" dependencies = [ "siphasher", ] @@ -2203,9 +2194,9 @@ checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "potential_utf" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ "zerovec", ] @@ -2247,9 +2238,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ "toml_edit", ] @@ -2273,30 +2264,29 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ "bit-set", "bit-vec", "bitflags", - "lazy_static", "num-traits", - "rand 0.9.1", + "rand 0.9.2", "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax", @@ -2313,9 +2303,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.40" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] @@ -2346,9 +2336,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", @@ -2390,7 +2380,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "serde", ] @@ -2405,29 +2395,29 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reth-chainspec" @@ -2471,7 +2461,7 @@ version = "1.8.2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -2831,8 +2821,8 @@ dependencies = [ [[package]] name = "revm" -version = "29.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "30.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "revm-bytecode", "revm-context", @@ -2849,8 +2839,8 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "6.2.2" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "7.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "bitvec", "phf", @@ -2860,8 +2850,8 @@ dependencies = [ [[package]] name = "revm-context" -version = "9.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "10.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "bitvec", "cfg-if", @@ -2875,8 +2865,8 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "10.2.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -2889,8 +2879,8 @@ dependencies = [ [[package]] name = "revm-database" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "9.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "revm-bytecode", "revm-database-interface", @@ -2900,8 +2890,8 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "8.0.2" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "either", @@ -2911,8 +2901,8 @@ dependencies = [ [[package]] name = "revm-handler" -version = "10.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "derive-where", @@ -2928,8 +2918,8 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "10.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "11.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "auto_impl", "either", @@ -2943,18 +2933,19 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "25.0.3" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "27.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "revm-bytecode", "revm-context-interface", "revm-primitives", + "revm-state", ] [[package]] name = "revm-precompile" -version = "27.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "28.1.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -2973,8 +2964,8 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "20.2.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "21.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "alloy-primitives", "num_enum", @@ -2985,7 +2976,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm#f5ae93d909f185461af8d0263b2549081e9781c8" +source = "git+https://github.com/scroll-tech/scroll-revm?tag=scroll-v91#a1ac004adf0019d9926defc4e31e6a76a7e558f7" dependencies = [ "auto_impl", "enumn", @@ -2997,8 +2988,8 @@ dependencies = [ [[package]] name = "revm-state" -version = "7.0.5" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv87#9fe419ed75b11d310e22a5bc7cc7df2a0d7eac25" +version = "8.0.1" +source = "git+https://github.com/scroll-tech/revm?tag=scroll-v91#10e11b985ed28bd383e624539868bcc3f613d77c" dependencies = [ "bitflags", "revm-bytecode", @@ -3037,13 +3028,14 @@ dependencies = [ [[package]] name = "ruint" -version = "1.15.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4" +checksum = "a68df0380e5c9d20ce49534f292a36a7514ae21350726efe1865bdb1fa91d278" dependencies = [ "alloy-rlp", "ark-ff 0.3.0", "ark-ff 0.4.2", + "ark-ff 0.5.0", "bytes", "fastrlp 0.3.1", "fastrlp 0.4.0", @@ -3054,10 +3046,10 @@ dependencies = [ "primitive-types", "proptest", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "rlp", "ruint-macro", - "serde", + "serde_core", "valuable", "zeroize", ] @@ -3095,33 +3087,33 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.26", + "semver 1.0.27", ] [[package]] name = "rustix" -version = "1.0.7" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys", ] [[package]] name = "rustversion" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "rusty-fork" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" dependencies = [ "fnv", "quick-error", @@ -3149,9 +3141,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.0.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" +checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" dependencies = [ "dyn-clone", "ref-cast", @@ -3260,9 +3252,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "semver-parser" @@ -3300,36 +3292,36 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] name = "serde_with" -version = "3.14.0" +version = "3.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" +checksum = "aa66c845eee442168b2c8134fec70ac50dc20e760769c8ba0ad1319ca1959b04" dependencies = [ "base64", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.10.0", + "indexmap 2.12.0", "schemars 0.9.0", - "schemars 1.0.4", - "serde", - "serde_derive", + "schemars 1.1.0", + "serde_core", "serde_json", "serde_with_macros", "time", @@ -3337,14 +3329,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.14.0" +version = "3.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" +checksum = "b91a903660542fced4e99881aa481bdbaec1634568ee02e0b8bd57c64cb38955" dependencies = [ - "darling 0.20.11", + "darling", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -3431,9 +3423,9 @@ dependencies = [ [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "static_assertions" @@ -3449,11 +3441,11 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" dependencies = [ - "strum_macros 0.27.1", + "strum_macros 0.27.2", ] [[package]] @@ -3466,20 +3458,19 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "strum_macros" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ "heck", "proc-macro2", "quote", - "rustversion", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -3501,9 +3492,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.104" +version = "2.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" dependencies = [ "proc-macro2", "quote", @@ -3512,14 +3503,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a985ff4ffd7373e10e0fb048110fb11a162e5a4c47f92ddb8787a6f766b769" +checksum = "ff790eb176cc81bb8936aed0f7b9f14fc4670069a2d371b3e3b0ecce908b2cb3" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -3530,7 +3521,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -3541,35 +3532,35 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.20.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "rustix", - "windows-sys 0.59.0", + "windows-sys", ] [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -3583,9 +3574,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -3598,15 +3589,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -3623,9 +3614,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", "zerovec", @@ -3633,18 +3624,31 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.11" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] [[package]] name = "toml_edit" -version = "0.22.27" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.12.0", "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ "winnow", ] @@ -3667,7 +3671,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -3681,9 +3685,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "ucd-trie" @@ -3711,9 +3715,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-segmentation" @@ -3729,9 +3733,9 @@ checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", @@ -3772,45 +3776,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" +name = "wasip2" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.104", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3818,31 +3809,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.104", - "wasm-bindgen-backend", + "syn 2.0.110", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" dependencies = [ "unicode-ident", ] [[package]] name = "windows-core" -version = "0.61.2" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", @@ -3853,219 +3844,79 @@ dependencies = [ [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "windows-link" -version = "0.1.3" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] [[package]] name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.2", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.2" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" - [[package]] name = "winnow" -version = "0.7.11" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "wyz" @@ -4078,11 +3929,10 @@ dependencies = [ [[package]] name = "yoke" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -4090,34 +3940,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -4137,15 +3987,15 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" dependencies = [ "zeroize_derive", ] @@ -4158,14 +4008,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] name = "zerotrie" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ "displaydoc", "yoke", @@ -4174,9 +4024,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ "yoke", "zerofrom", @@ -4185,13 +4035,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.110", ] [[package]] @@ -4214,9 +4064,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.15+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/crates/scroll/openvm-compat/Cargo.toml b/crates/scroll/openvm-compat/Cargo.toml index e7df83724da..4cf2cf3044e 100644 --- a/crates/scroll/openvm-compat/Cargo.toml +++ b/crates/scroll/openvm-compat/Cargo.toml @@ -28,4 +28,4 @@ scroll-alloy-consensus = { path = "../alloy/consensus", default-features = false scroll-alloy-rpc-types = { path = "../alloy/rpc-types", default-features = false } [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm" } +revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" } diff --git a/crates/scroll/payload/src/builder.rs b/crates/scroll/payload/src/builder.rs index ca754cbadb3..1022ab80aad 100644 --- a/crates/scroll/payload/src/builder.rs +++ b/crates/scroll/payload/src/builder.rs @@ -30,7 +30,7 @@ use reth_scroll_evm::{ScrollBaseFeeProvider, ScrollNextBlockEnvAttributes}; use reth_scroll_primitives::{ScrollPrimitives, ScrollTransactionSigned}; use reth_storage_api::{BaseFeeProvider, StateProvider, StateProviderFactory}; use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; -use revm::context::{Block, BlockEnv}; +use revm::context::Block; use scroll_alloy_hardforks::ScrollHardforks; use std::{boxed::Box, sync::Arc, vec, vec::Vec}; @@ -376,9 +376,9 @@ where } /// Returns the current fee settings for transactions from the mempool - pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes { + pub fn best_transaction_attributes(&self, block_env: impl Block) -> BestTransactionsAttributes { BestTransactionsAttributes::new( - block_env.basefee, + block_env.basefee(), block_env.blob_gasprice().map(|p| p as u64), ) } @@ -435,7 +435,7 @@ where builder: &mut impl BlockBuilder, ) -> Result { let mut info = ExecutionInfo::new(); - let block_gas_limit = builder.evm().block().gas_limit; + let block_gas_limit = builder.evm().block().gas_limit(); let mut gas_spent_by_transactions = Vec::new(); for sequencer_tx in &self.attributes().transactions { @@ -506,8 +506,8 @@ where builder_config: &ScrollBuilderConfig, breaker: PayloadBuildingBreaker, ) -> Result, PayloadBuilderError> { - let block_gas_limit = builder.evm_mut().block().gas_limit; - let base_fee = builder.evm_mut().block().basefee; + let block_gas_limit = builder.evm_mut().block().gas_limit(); + let base_fee = builder.evm_mut().block().basefee(); while let Some(tx) = best_txs.next(()) { let tx = tx.into_consensus(); diff --git a/crates/scroll/rpc/src/eth/call.rs b/crates/scroll/rpc/src/eth/call.rs index bcc263fbcc7..3ea1fa40f02 100644 --- a/crates/scroll/rpc/src/eth/call.rs +++ b/crates/scroll/rpc/src/eth/call.rs @@ -1,6 +1,5 @@ use crate::{ScrollEthApi, ScrollEthApiError}; -use reth_evm::{SpecFor, TxEnvFor}; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall}, RpcConvert, RpcNodeCore, @@ -11,12 +10,7 @@ impl EthCall for ScrollEthApi where N: RpcNodeCore, ScrollEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = ScrollEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -24,12 +18,7 @@ impl EstimateCall for ScrollEthApi where N: RpcNodeCore, ScrollEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = ScrollEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { } @@ -37,12 +26,7 @@ impl Call for ScrollEthApi where N: RpcNodeCore, ScrollEthApiError: FromEvmError, - Rpc: RpcConvert< - Primitives = N::Primitives, - Error = ScrollEthApiError, - TxEnv = TxEnvFor, - Spec = SpecFor, - >, + Rpc: RpcConvert, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 0a9aaef73de..2446219ea3d 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -9,7 +9,7 @@ use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ providers::ProviderNodeTypes, BlockHashReader, BlockNumReader, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, DatabaseProviderFactory, ProviderFactory, - PruneCheckpointReader, StageCheckpointReader, StageCheckpointWriter, + PruneCheckpointReader, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -31,7 +31,7 @@ use crate::{ }; pub use builder::*; use progress::*; -use reth_errors::RethResult; +use reth_errors::{ProviderResult, RethResult}; pub use set::*; /// A container for a queued stage. @@ -101,12 +101,6 @@ impl Pipeline { PipelineBuilder::default() } - /// Return the minimum block number achieved by - /// any stage during the execution of the pipeline. - pub const fn minimum_block_number(&self) -> Option { - self.progress.minimum_block_number - } - /// Set tip for reverse sync. #[track_caller] pub fn set_tip(&self, tip: B256) { @@ -127,9 +121,7 @@ impl Pipeline { ) -> &mut dyn Stage< as DatabaseProviderFactory>::ProviderRW> { &mut self.stages[idx] } -} -impl Pipeline { /// Registers progress metrics for each registered stage pub fn register_metrics(&mut self) -> Result<(), PipelineError> { let Some(metrics_tx) = &mut self.metrics_tx else { return Ok(()) }; @@ -290,6 +282,81 @@ impl Pipeline { Ok(()) } + /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less + /// than the checkpoint of the first stage). + /// + /// This will return the pipeline target if: + /// * the pipeline was interrupted during its previous run + /// * a new stage was added + /// * stage data was dropped manually through `reth stage drop ...` + /// + /// # Returns + /// + /// A target block hash if the pipeline is inconsistent, otherwise `None`. + pub fn initial_backfill_target(&self) -> ProviderResult> { + let provider = self.provider_factory.provider()?; + + // If no target was provided, check if the stages are congruent - check if the + // checkpoint of the last stage matches the checkpoint of the first. + let first_stage_checkpoint = provider + .get_stage_checkpoint(self.stages.first().unwrap().id())? + .unwrap_or_default() + .block_number; + + // Skip the first stage as we've already retrieved it and comparing all other checkpoints + // against it. + for stage in self.stages.iter().skip(1) { + let stage_id = stage.id(); + + let stage_checkpoint = + provider.get_stage_checkpoint(stage_id)?.unwrap_or_default().block_number; + + // If the checkpoint of any stage is less than the checkpoint of the first stage, + // retrieve and return the block hash of the latest header and use it as the target. + if stage_checkpoint < first_stage_checkpoint { + debug!( + target: "consensus::engine", + first_stage_checkpoint, + inconsistent_stage_id = %stage_id, + inconsistent_stage_checkpoint = stage_checkpoint, + "Pipeline sync progress is inconsistent" + ); + return provider.block_hash(first_stage_checkpoint); + } + } + + Ok(None) + } + + /// Checks for consistency between database and static files. If it fails, it unwinds to + /// the first block that's consistent between database and static files. + pub async fn ensure_static_files_consistency(&mut self) -> Result<(), PipelineError> { + let maybe_unwind_target = self + .provider_factory + .static_file_provider() + .check_consistency(&self.provider_factory.provider()?)?; + + self.move_to_static_files()?; + + if let Some(unwind_target) = maybe_unwind_target { + // Highly unlikely to happen, and given its destructive nature, it's better to panic + // instead. + assert_ne!( + unwind_target, + 0, + "A static file <> database inconsistency was found that would trigger an unwind to block 0" + ); + + info!(target: "reth::cli", unwind_target = %unwind_target, "Executing an unwind after a failed storage consistency check."); + + self.unwind(unwind_target, None).inspect_err(|err| { + error!(target: "reth::cli", unwind_target = %unwind_target, %err, "failed to run unwind") + })?; + } + + Ok(()) + } + /// Unwind the stages to the target block (exclusive). /// /// If the unwind is due to a bad block the number of that block should be specified. diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs index 436ee769659..971bc11f897 100644 --- a/crates/stages/stages/src/stages/era.rs +++ b/crates/stages/stages/src/stages/era.rs @@ -211,10 +211,16 @@ where height } else { - input.target() + // It's possible for a pipeline sync to be executed with a None target, e.g. after a + // stage was manually dropped, and `reth node` is then called without a `--debug.tip`. + // + // In this case we don't want to simply default to zero, as that would overwrite the + // previously stored checkpoint block number. Instead we default to that previous + // checkpoint. + input.target.unwrap_or_else(|| input.checkpoint().block_number) }; - Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height == input.target() }) + Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height >= input.target() }) } fn unwind( diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index f9b2312f5ab..7e57009e808 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -72,9 +72,7 @@ mod tests { StaticFileProviderFactory, StorageReader, }; use reth_prune_types::{PruneMode, PruneModes}; - use reth_stages_api::{ - ExecInput, ExecutionStageThresholds, PipelineTarget, Stage, StageCheckpoint, StageId, - }; + use reth_stages_api::{ExecInput, ExecutionStageThresholds, Stage, StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_receipt, BlockRangeParams, @@ -301,7 +299,7 @@ mod tests { prune_count: usize, segment: StaticFileSegment, is_full_node: bool, - expected: Option, + expected: Option, ) { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. @@ -323,11 +321,18 @@ mod tests { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. + let mut provider = db.factory.database_provider_ro().unwrap(); + if is_full_node { + provider.set_prune_modes(PruneModes { + receipts: Some(PruneMode::Full), + ..Default::default() + }); + } let mut static_file_provider = db.factory.static_file_provider(); static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); assert!(matches!( static_file_provider - .check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,), + .check_consistency(&provider), Ok(e) if e == expected )); } @@ -338,7 +343,7 @@ mod tests { db: &TestStageDB, stage_id: StageId, checkpoint_block_number: BlockNumber, - expected: Option, + expected: Option, ) { let provider_rw = db.factory.provider_rw().unwrap(); provider_rw @@ -349,18 +354,15 @@ mod tests { assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap(), false,), + .check_consistency(&db.factory.database_provider_ro().unwrap()), Ok(e) if e == expected )); } /// Inserts a dummy value at key and compare the check consistency result against the expected /// one. - fn update_db_and_check>( - db: &TestStageDB, - key: u64, - expected: Option, - ) where + fn update_db_and_check>(db: &TestStageDB, key: u64, expected: Option) + where ::Value: Default, { update_db_with_and_check::(db, key, expected, &Default::default()); @@ -371,7 +373,7 @@ mod tests { fn update_db_with_and_check>( db: &TestStageDB, key: u64, - expected: Option, + expected: Option, value: &T::Value, ) { let provider_rw = db.factory.provider_rw().unwrap(); @@ -382,7 +384,7 @@ mod tests { assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap(), false), + .check_consistency(&db.factory.database_provider_ro().unwrap()), Ok(e) if e == expected )); } @@ -393,7 +395,7 @@ mod tests { let db_provider = db.factory.database_provider_ro().unwrap(); assert!(matches!( - db.factory.static_file_provider().check_consistency(&db_provider, false), + db.factory.static_file_provider().check_consistency(&db_provider), Ok(None) )); } @@ -415,7 +417,7 @@ mod tests { 1, StaticFileSegment::Receipts, archive_node, - Some(PipelineTarget::Unwind(88)), + Some(88), ); simulate_behind_checkpoint_corruption( @@ -423,7 +425,7 @@ mod tests { 3, StaticFileSegment::Headers, archive_node, - Some(PipelineTarget::Unwind(86)), + Some(86), ); } @@ -472,7 +474,7 @@ mod tests { ); // When a checkpoint is ahead, we request a pipeline unwind. - save_checkpoint_and_check(&db, StageId::Headers, 91, Some(PipelineTarget::Unwind(block))); + save_checkpoint_and_check(&db, StageId::Headers, 91, Some(block)); } #[test] @@ -485,7 +487,7 @@ mod tests { .unwrap(); // Creates a gap of one header: static_file db - update_db_and_check::(&db, current + 2, Some(PipelineTarget::Unwind(89))); + update_db_and_check::(&db, current + 2, Some(89)); // Fill the gap, and ensure no unwind is necessary. update_db_and_check::(&db, current + 1, None); @@ -504,7 +506,7 @@ mod tests { update_db_with_and_check::( &db, current + 2, - Some(PipelineTarget::Unwind(89)), + Some(89), &TxLegacy::default().into_signed(Signature::test_signature()).into(), ); @@ -527,7 +529,7 @@ mod tests { .unwrap(); // Creates a gap of one receipt: static_file db - update_db_and_check::(&db, current + 2, Some(PipelineTarget::Unwind(89))); + update_db_and_check::(&db, current + 2, Some(89)); // Fill the gap, and ensure no unwind is necessary. update_db_and_check::(&db, current + 1, None); diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index 23308bcfa55..38b96d6bd0f 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -21,6 +21,9 @@ use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_trie_common::{HashedPostState, KeccakKeyHasher}; +/// BLOCKHASH ancestor lookup window limit per EVM (number of most recent blocks accessible). +const BLOCKHASH_ANCESTOR_LIMIT: usize = 256; + /// Errors that can occur during stateless validation. #[derive(Debug, thiserror::Error)] pub enum StatelessValidationError { @@ -175,6 +178,15 @@ where // ascending order. ancestor_headers.sort_by_key(|header| header.number()); + // Enforce BLOCKHASH ancestor headers limit (256 most recent blocks) + let count = ancestor_headers.len(); + if count > BLOCKHASH_ANCESTOR_LIMIT { + return Err(StatelessValidationError::AncestorHeaderLimitExceeded { + count, + limit: BLOCKHASH_ANCESTOR_LIMIT, + }); + } + // Check that the ancestor headers form a contiguous chain and are not just random headers. let ancestor_hashes = compute_ancestor_hashes(¤t_block, &ancestor_headers)?; diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 67e5f32b07c..1ac37966c2e 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -312,10 +312,9 @@ where return (None, buf) } - let (len, mut buf) = decode_varuint(buf); + let (len, buf) = decode_varuint(buf); - let (element, _) = T::from_compact(&buf[..len], len); - buf.advance(len); + let (element, buf) = T::from_compact(buf, len); (Some(element), buf) } diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 900dd525f4a..f8536bbb1ce 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -31,7 +31,7 @@ alloy-consensus.workspace = true reth-scroll-primitives = { workspace = true, optional = true } # optimism -reth-optimism-primitives = { workspace = true, optional = true } +reth-optimism-primitives = { workspace = true, optional = true, features = ["serde", "reth-codec"] } # codecs modular-bitfield.workspace = true diff --git a/crates/storage/db-api/src/tables/mod.rs b/crates/storage/db-api/src/tables/mod.rs index a5cb5ff477d..259b2d39b15 100644 --- a/crates/storage/db-api/src/tables/mod.rs +++ b/crates/storage/db-api/src/tables/mod.rs @@ -531,7 +531,7 @@ pub enum ChainStateKey { /// Last finalized block key LastFinalizedBlock, /// Last safe block key - LastSafeBlockBlock, + LastSafeBlock, } impl Encode for ChainStateKey { @@ -540,7 +540,7 @@ impl Encode for ChainStateKey { fn encode(self) -> Self::Encoded { match self { Self::LastFinalizedBlock => [0], - Self::LastSafeBlockBlock => [1], + Self::LastSafeBlock => [1], } } } @@ -549,7 +549,7 @@ impl Decode for ChainStateKey { fn decode(value: &[u8]) -> Result { match value { [0] => Ok(Self::LastFinalizedBlock), - [1] => Ok(Self::LastSafeBlockBlock), + [1] => Ok(Self::LastSafeBlock), _ => Err(crate::DatabaseError::Decode), } } diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 82a3726c43e..e8599a89706 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -29,7 +29,6 @@ reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-codecs.workspace = true -reth-evm.workspace = true reth-chain-state.workspace = true reth-node-types.workspace = true reth-static-file-types.workspace = true @@ -90,7 +89,6 @@ test-utils = [ "reth-ethereum-engine-primitives", "reth-ethereum-primitives/test-utils", "reth-chainspec/test-utils", - "reth-evm/test-utils", "reth-primitives-traits/test-utils", "reth-codecs/test-utils", "reth-db-api/test-utils", diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 75e276b3c42..7040032eca0 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,48 +1,34 @@ -#![allow(unused)] use crate::{ providers::{ConsistentProvider, ProviderNodeTypes, StaticFileProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, - ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, - DatabaseProviderFactory, FullProvider, HashedPostStateProvider, HeaderProvider, ProviderError, - ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, + ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, + HashedPostStateProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, + ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, + StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, }; -use alloy_consensus::{transaction::TransactionMeta, Header}; -use alloy_eips::{ - eip4895::{Withdrawal, Withdrawals}, - BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, -}; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_consensus::transaction::TransactionMeta; +use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ BlockState, CanonicalInMemoryState, ForkChoiceNotifications, ForkChoiceSubscriptions, MemoryOverlayStateProvider, }; -use reth_chainspec::{ChainInfo, EthereumHardforks}; -use reth_db_api::{ - models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}, - transaction::DbTx, - Database, -}; -use reth_ethereum_primitives::{Block, EthPrimitives, Receipt, TransactionSigned}; -use reth_evm::{ConfigureEvm, EvmEnv}; +use reth_chainspec::ChainInfo; +use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}; use reth_execution_types::ExecutionOutcome; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; -use reth_primitives_traits::{ - Account, BlockBody, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, StorageEntry, -}; +use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{ - BlockBodyIndicesProvider, DBProvider, NodePrimitivesProvider, StorageChangeSetReader, -}; +use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{HashedPostState, KeccakKeyHasher}; use revm_database::BundleState; use std::{ - ops::{Add, RangeBounds, RangeInclusive, Sub}, + ops::{RangeBounds, RangeInclusive}, sync::Arc, time::Instant, }; @@ -716,6 +702,14 @@ impl ChangeSetReader for BlockchainProvider { ) -> ProviderResult> { self.consistent_provider()?.account_block_changeset(block_number) } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult> { + self.consistent_provider()?.get_account_before_block(block_number, address) + } } impl AccountReader for BlockchainProvider { @@ -753,8 +747,7 @@ mod tests { create_test_provider_factory, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }, - BlockWriter, CanonChainTracker, ProviderFactory, StaticFileProviderFactory, - StaticFileWriter, + BlockWriter, CanonChainTracker, ProviderFactory, }; use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, TxNumber, B256}; @@ -765,22 +758,12 @@ mod tests { CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, NewCanonicalChain, }; - use reth_chainspec::{ - ChainSpec, ChainSpecBuilder, ChainSpecProvider, EthereumHardfork, MAINNET, - }; - use reth_db_api::{ - cursor::DbCursorRO, - models::{AccountBeforeTx, StoredBlockBodyIndices}, - tables, - transaction::DbTx, - }; + use reth_chainspec::{ChainSpec, MAINNET}; + use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; - use reth_ethereum_primitives::{Block, EthPrimitives, Receipt}; + use reth_ethereum_primitives::{Block, Receipt}; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives_traits::{ - BlockBody, RecoveredBlock, SealedBlock, SignedTransaction, SignerRecoverable, - }; - use reth_static_file_types::StaticFileSegment; + use reth_primitives_traits::{RecoveredBlock, SealedBlock, SignerRecoverable}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DBProvider, DatabaseProviderFactory, @@ -793,9 +776,8 @@ mod tests { }; use revm_database::{BundleState, OriginalValuesKnown}; use std::{ - ops::{Bound, Deref, Range, RangeBounds}, + ops::{Bound, Range, RangeBounds}, sync::Arc, - time::Instant, }; const TEST_BLOCKS_COUNT: usize = 5; @@ -2586,14 +2568,15 @@ mod tests { persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[1].number); let to_be_persisted_tx = in_memory_blocks[1].body().transactions[0].clone(); - assert!(matches!( + assert_eq!( correct_transaction_hash_fn( *to_be_persisted_tx.tx_hash(), provider.canonical_in_memory_state(), provider.database - ), - Ok(Some(to_be_persisted_tx)) - )); + ) + .unwrap(), + Some(to_be_persisted_tx) + ); } Ok(()) diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 03615d5357b..93415e8e347 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1422,6 +1422,52 @@ impl ChangeSetReader for ConsistentProvider { self.storage_provider.account_block_changeset(block_number) } } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + // Search in-memory state for the account changeset + let changeset = state + .block_ref() + .execution_output + .bundle + .reverts + .clone() + .to_plain_state_reverts() + .accounts + .into_iter() + .flatten() + .find(|(addr, _)| addr == &address) + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }); + Ok(changeset) + } else { + // Perform checks on whether or not changesets exist for the block. + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let account_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::AccountHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !account_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + // Delegate to the storage provider for database lookups + self.storage_provider.get_account_before_block(block_number, address) + } + } } impl AccountReader for ConsistentProvider { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 54642a94757..f7b3c4ba603 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -111,6 +111,11 @@ impl ProviderFactory { pub fn into_db(self) -> N::DB { self.db } + + /// Returns reference to the prune modes. + pub const fn prune_modes_ref(&self) -> &PruneModes { + &self.prune_modes + } } impl>> ProviderFactory { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8fbd71e2ace..23278592c31 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -921,6 +921,19 @@ impl ChangeSetReader for DatabaseProvider { }) .collect() } + + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult> { + self.tx + .cursor_dup_read::()? + .seek_by_key_subkey(block_number, address)? + .filter(|acc| acc.address == address) + .map(Ok) + .transpose() + } } impl HeaderSyncGapProvider @@ -1950,7 +1963,6 @@ impl StateWriter for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; // delete previous value - // TODO: This does not use dupsort features if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) @@ -2049,7 +2061,6 @@ impl StateWriter for (storage_key, (old_storage_value, _new_storage_value)) in storage { let storage_entry = StorageEntry { key: *storage_key, value: *old_storage_value }; // delete previous value - // TODO: This does not use dupsort features if plain_storage_cursor .seek_by_key_subkey(*address, *storage_key)? .filter(|s| s.key == *storage_key) @@ -2837,7 +2848,7 @@ impl ChainStateBlockReader for DatabaseProvide let mut finalized_blocks = self .tx .cursor_read::()? - .walk(Some(tables::ChainStateKey::LastSafeBlockBlock))? + .walk(Some(tables::ChainStateKey::LastSafeBlock))? .take(1) .collect::, _>>()?; @@ -2854,9 +2865,7 @@ impl ChainStateBlockWriter for DatabaseProvider ProviderResult<()> { - Ok(self - .tx - .put::(tables::ChainStateKey::LastSafeBlockBlock, block_number)?) + Ok(self.tx.put::(tables::ChainStateKey::LastSafeBlock, block_number)?) } } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 9a22a527ccb..f3e69bf7d91 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, + ChangeSetReader, HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; @@ -241,23 +241,23 @@ impl HistoricalStateProviderRef<'_, Provi } } -impl AccountReader +impl AccountReader for HistoricalStateProviderRef<'_, Provider> { /// Get basic account information. fn basic_account(&self, address: &Address) -> ProviderResult> { match self.account_history_lookup(*address)? { HistoryInfo::NotYetWritten => Ok(None), - HistoryInfo::InChangeset(changeset_block_number) => Ok(self - .tx() - .cursor_dup_read::()? - .seek_by_key_subkey(changeset_block_number, *address)? - .filter(|acc| &acc.address == address) - .ok_or(ProviderError::AccountChangesetNotFound { - block_number: changeset_block_number, - address: *address, - })? - .info), + HistoryInfo::InChangeset(changeset_block_number) => { + // Use ChangeSetReader trait method to get the account from changesets + self.provider + .get_account_before_block(changeset_block_number, *address)? + .ok_or(ProviderError::AccountChangesetNotFound { + block_number: changeset_block_number, + address: *address, + }) + .map(|account_before| account_before.info) + } HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { Ok(self.tx().get_by_encoded_key::(address)?) } @@ -394,7 +394,7 @@ impl HashedPostStateProvider for HistoricalStateProviderRef<'_, } } -impl StateProvider +impl StateProvider for HistoricalStateProviderRef<'_, Provider> { /// Get storage. @@ -485,7 +485,7 @@ impl HistoricalStateProvider { } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader ]); +delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -530,7 +530,9 @@ mod tests { BlockNumberList, }; use reth_primitives_traits::{Account, StorageEntry}; - use reth_storage_api::{BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory}; + use reth_storage_api::{ + BlockHashReader, BlockNumReader, ChangeSetReader, DBProvider, DatabaseProviderFactory, + }; use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0x0000000000000000000000000000000000000001"); @@ -540,7 +542,9 @@ mod tests { const fn assert_state_provider() {} #[expect(dead_code)] - const fn assert_historical_state_provider() { + const fn assert_historical_state_provider< + T: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader, + >() { assert_state_provider::>(); } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e93b4fe10df..9f91cb9d5f8 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -37,7 +37,7 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::{FullNodePrimitives, NodePrimitives}; use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction}; -use reth_stages_types::{PipelineTarget, StageId}; +use reth_stages_types::StageId; use reth_static_file_types::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, @@ -731,15 +731,14 @@ impl StaticFileProvider { /// * its highest block should match the stage checkpoint block number if it's equal or higher /// than the corresponding database table last entry. /// - /// Returns a [`Option`] of [`PipelineTarget::Unwind`] if any healing is further required. + /// Returns a [`Option`] with block number to unwind to if any healing is further required. /// /// WARNING: No static file writer should be held before calling this function, otherwise it /// will deadlock. pub fn check_consistency( &self, provider: &Provider, - has_receipt_pruning: bool, - ) -> ProviderResult> + ) -> ProviderResult> where Provider: DBProvider + BlockReader + StageCheckpointReader + ChainSpecProvider, N: NodePrimitives, @@ -776,7 +775,7 @@ impl StaticFileProvider { }; for segment in StaticFileSegment::iter() { - if has_receipt_pruning && segment.is_receipts() { + if provider.prune_modes_ref().has_receipts_pruning() && segment.is_receipts() { // Pruned nodes (including full node) do not store receipts as static files. continue } @@ -887,7 +886,7 @@ impl StaticFileProvider { } } - Ok(unwind_target.map(PipelineTarget::Unwind)) + Ok(unwind_target) } /// Checks consistency of the latest static file segment and throws an error if at fault. diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index d5e3fe4da7b..1024312ead9 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -984,6 +984,14 @@ impl ChangeSetReader for MockEthProvi ) -> ProviderResult> { Ok(Vec::default()) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult> { + Ok(None) + } } impl StateReader for MockEthProvider { diff --git a/crates/storage/rpc-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs index 76e511d52d4..ed6e49eefbd 100644 --- a/crates/storage/rpc-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -1764,6 +1764,14 @@ where ) -> Result, ProviderError> { Err(ProviderError::UnsupportedProvider) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult> { + Err(ProviderError::UnsupportedProvider) + } } impl StateProviderFactory for RpcBlockchainStateProvider diff --git a/crates/storage/storage-api/src/account.rs b/crates/storage/storage-api/src/account.rs index 1692c4c21f4..270bfd1226c 100644 --- a/crates/storage/storage-api/src/account.rs +++ b/crates/storage/storage-api/src/account.rs @@ -54,4 +54,13 @@ pub trait ChangeSetReader { &self, block_number: BlockNumber, ) -> ProviderResult>; + + /// Search the block's changesets for the given address, and return the result. + /// + /// Returns `None` if the account was not changed in this block. + fn get_account_before_block( + &self, + block_number: BlockNumber, + address: Address, + ) -> ProviderResult>; } diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 44e499ae006..e0c57d5226b 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -399,6 +399,14 @@ impl ChangeSetReader for NoopProvider { ) -> ProviderResult> { Ok(Vec::default()) } + + fn get_account_before_block( + &self, + _block_number: BlockNumber, + _address: Address, + ) -> ProviderResult> { + Ok(None) + } } impl StateRootProvider for NoopProvider { diff --git a/crates/tracing-otlp/Cargo.toml b/crates/tracing-otlp/Cargo.toml index 7b8b666116c..60cee0aa229 100644 --- a/crates/tracing-otlp/Cargo.toml +++ b/crates/tracing-otlp/Cargo.toml @@ -9,13 +9,29 @@ repository.workspace = true exclude.workspace = true [dependencies] -opentelemetry_sdk = "0.29.0" -opentelemetry = "0.29.1" -opentelemetry-otlp = "0.29.0" -tracing-opentelemetry = "0.30.0" +# obs +opentelemetry_sdk = { workspace = true, optional = true } +opentelemetry = { workspace = true, optional = true } +opentelemetry-otlp = { workspace = true, optional = true } +opentelemetry-semantic-conventions = { workspace = true, optional = true } +tracing-opentelemetry = { workspace = true, optional = true } tracing-subscriber.workspace = true tracing.workspace = true -opentelemetry-semantic-conventions = "0.29.0" + +# misc +eyre.workspace = true +url.workspace = true [lints] workspace = true + +[features] +default = ["otlp"] + +otlp = [ + "opentelemetry", + "opentelemetry_sdk", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "tracing-opentelemetry", +] diff --git a/crates/tracing-otlp/src/lib.rs b/crates/tracing-otlp/src/lib.rs index 1de112cdb33..07415ac2a65 100644 --- a/crates/tracing-otlp/src/lib.rs +++ b/crates/tracing-otlp/src/lib.rs @@ -1,12 +1,16 @@ +#![cfg(feature = "otlp")] + //! Provides a tracing layer for `OpenTelemetry` that exports spans to an OTLP endpoint. //! //! This module simplifies the integration of `OpenTelemetry` tracing with OTLP export in Rust //! applications. It allows for easily capturing and exporting distributed traces to compatible //! backends like Jaeger, Zipkin, or any other OpenTelemetry-compatible tracing system. -use opentelemetry::{trace::TracerProvider, KeyValue, Value}; -use opentelemetry_otlp::SpanExporter; +use eyre::{ensure, WrapErr}; +use opentelemetry::{global, trace::TracerProvider, KeyValue, Value}; +use opentelemetry_otlp::{SpanExporter, WithExportConfig}; use opentelemetry_sdk::{ + propagation::TraceContextPropagator, trace::{SdkTracer, SdkTracerProvider}, Resource, }; @@ -14,25 +18,73 @@ use opentelemetry_semantic_conventions::{attribute::SERVICE_VERSION, SCHEMA_URL} use tracing::Subscriber; use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::registry::LookupSpan; +use url::Url; /// Creates a tracing [`OpenTelemetryLayer`] that exports spans to an OTLP endpoint. /// /// This layer can be added to a [`tracing_subscriber::Registry`] to enable `OpenTelemetry` tracing -/// with OTLP export. -pub fn layer(service_name: impl Into) -> OpenTelemetryLayer +/// with OTLP export to an url. +pub fn span_layer( + service_name: impl Into, + endpoint: &Url, +) -> eyre::Result> where for<'span> S: Subscriber + LookupSpan<'span>, { - let exporter = SpanExporter::builder().with_http().build().unwrap(); + global::set_text_map_propagator(TraceContextPropagator::new()); + + let resource = build_resource(service_name); + + let span_exporter = + SpanExporter::builder().with_http().with_endpoint(endpoint.to_string()).build()?; + + let tracer_provider = SdkTracerProvider::builder() + .with_resource(resource) + .with_batch_exporter(span_exporter) + .build(); + + global::set_tracer_provider(tracer_provider.clone()); + + let tracer = tracer_provider.tracer("reth-otlp"); + Ok(tracing_opentelemetry::layer().with_tracer(tracer)) +} - let resource = Resource::builder() +// Builds OTLP resource with service information. +fn build_resource(service_name: impl Into) -> Resource { + Resource::builder() .with_service_name(service_name) .with_schema_url([KeyValue::new(SERVICE_VERSION, env!("CARGO_PKG_VERSION"))], SCHEMA_URL) - .build(); + .build() +} + +/// Destination for exported trace spans. +#[derive(Debug, Clone)] +pub enum TraceOutput { + /// Export traces as JSON to stdout. + Stdout, + /// Export traces to an OTLP collector at the specified URL. + Otlp(Url), +} + +impl TraceOutput { + /// Parses the trace output destination from a string. + /// + /// Returns `TraceOutput::Stdout` for "stdout", or `TraceOutput::Otlp` for valid OTLP URLs. + /// OTLP URLs must end with `/v1/traces` per the OTLP specification. + pub fn parse(s: &str) -> eyre::Result { + if s == "stdout" { + return Ok(Self::Stdout); + } + + let url = Url::parse(s).wrap_err("Invalid URL for trace output")?; - let provider = - SdkTracerProvider::builder().with_resource(resource).with_batch_exporter(exporter).build(); + // OTLP specification requires the `/v1/traces` path for trace endpoints + ensure!( + url.path().ends_with("/v1/traces"), + "OTLP trace endpoint must end with /v1/traces, got path: {}", + url.path() + ); - let tracer = provider.tracer("reth-otlp"); - tracing_opentelemetry::layer().with_tracer(tracer) + Ok(Self::Otlp(url)) + } } diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index a5c09c23a35..8cf83e138ca 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -12,11 +12,22 @@ description = "tracing helpers" workspace = true [dependencies] +# reth +reth-tracing-otlp = { workspace = true, optional = true } + +# obs tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "ansi", "json"] } tracing-appender.workspace = true tracing-journald.workspace = true tracing-logfmt.workspace = true -rolling-file.workspace = true -eyre.workspace = true + +# misc clap = { workspace = true, features = ["derive"] } +eyre.workspace = true +rolling-file.workspace = true +url = { workspace = true, optional = true } + +[features] +default = ["otlp"] +otlp = ["reth-tracing-otlp", "dep:url"] diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 5b9c93b5fb6..385c4fac51d 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -1,14 +1,14 @@ +use crate::formatter::LogFormat; +#[cfg(feature = "otlp")] +use reth_tracing_otlp::span_layer; +use rolling_file::{RollingConditionBasic, RollingFileAppender}; use std::{ fmt, path::{Path, PathBuf}, }; - -use rolling_file::{RollingConditionBasic, RollingFileAppender}; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; -use crate::formatter::LogFormat; - /// A worker guard returned by the file layer. /// /// When a guard is dropped, all events currently in-memory are flushed to the log file this guard @@ -123,6 +123,25 @@ impl Layers { self.add_layer(layer); Ok(guard) } + + /// Add OTLP spans layer to the layer collection + #[cfg(feature = "otlp")] + pub fn with_span_layer( + &mut self, + service_name: String, + endpoint_exporter: url::Url, + level: tracing::Level, + ) -> eyre::Result<()> { + // Create the span provider + + let span_layer = span_layer(service_name, &endpoint_exporter) + .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? + .with_filter(tracing::level_filters::LevelFilter::from_level(level)); + + self.add_layer(span_layer); + + Ok(()) + } } /// Holds configuration information for file logging. diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 5ccafe15000..b883345aac6 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -4,6 +4,8 @@ use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStor use alloy_eips::{ eip4844::{BlobAndProofV1, BlobAndProofV2}, eip7594::BlobTransactionSidecarVariant, + eip7840::BlobParams, + merge::EPOCH_SLOTS, }; use alloy_primitives::{TxHash, B256}; use parking_lot::{Mutex, RwLock}; @@ -14,6 +16,13 @@ use tracing::{debug, trace}; /// How many [`BlobTransactionSidecarVariant`] to cache in memory. pub const DEFAULT_MAX_CACHED_BLOBS: u32 = 100; +/// A cache size heuristic based on the highest blob params +/// +/// This uses the max blobs per tx and max blobs per block over 16 epochs: `21 * 6 * 512 = 64512` +/// This should be ~4MB +const VERSIONED_HASH_TO_TX_HASH_CACHE_SIZE: u64 = + BlobParams::bpo2().max_blobs_per_tx * BlobParams::bpo2().max_blob_count * EPOCH_SLOTS * 16; + /// A blob store that stores blob data on disk. /// /// The type uses deferred deletion, meaning that blobs are not immediately deleted from disk, but @@ -288,7 +297,9 @@ impl DiskFileBlobStoreInner { size_tracker: Default::default(), file_lock: Default::default(), txs_to_delete: Default::default(), - versioned_hashes_to_txhash: Mutex::new(LruMap::new(ByLength::new(max_length * 6))), + versioned_hashes_to_txhash: Mutex::new(LruMap::new(ByLength::new( + VERSIONED_HASH_TO_TX_HASH_CACHE_SIZE as u32, + ))), } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 74d92fb3e6b..6360817caa1 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -157,7 +157,7 @@ pub enum Eip4844PoolTransactionError { /// Thrown if an EIP-4844 transaction without any blobs arrives #[error("blobless blob transaction")] NoEip4844Blobs, - /// Thrown if an EIP-4844 transaction without any blobs arrives + /// Thrown if an EIP-4844 transaction arrives with too many blobs #[error("too many blobs in transaction: have {have}, permitted {permitted}")] TooManyEip4844Blobs { /// Number of blobs the transaction has diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index a5aa664e764..90cd042df69 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -16,6 +16,8 @@ use std::{ use tokio::sync::broadcast::{error::TryRecvError, Receiver}; use tracing::debug; +const MAX_NEW_TRANSACTIONS_PER_BATCH: usize = 16; + /// An iterator that returns transactions that can be executed on the current state (*best* /// transactions). /// @@ -165,13 +167,17 @@ impl BestTransactions { /// Checks for new transactions that have come into the `PendingPool` after this iterator was /// created and inserts them fn add_new_transactions(&mut self) { - while let Some(pending_tx) = self.try_recv() { - // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked - let tx_id = *pending_tx.transaction.id(); - if self.ancestor(&tx_id).is_none() { - self.independent.insert(pending_tx.clone()); + for _ in 0..MAX_NEW_TRANSACTIONS_PER_BATCH { + if let Some(pending_tx) = self.try_recv() { + // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked + let tx_id = *pending_tx.transaction.id(); + if self.ancestor(&tx_id).is_none() { + self.independent.insert(pending_tx.clone()); + } + self.all.insert(tx_id, pending_tx); + } else { + break; } - self.all.insert(tx_id, pending_tx); } } } diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 43a652a1476..193442174ca 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -260,35 +260,33 @@ impl ParkedPool> { &self, basefee: u64, ) -> Vec>> { - let ids = self.satisfy_base_fee_ids(basefee as u128); - let mut txs = Vec::with_capacity(ids.len()); - for id in ids { - txs.push(self.get(&id).expect("transaction exists").transaction.clone().into()); - } + let mut txs = Vec::new(); + self.satisfy_base_fee_ids(basefee as u128, |tx| { + txs.push(tx.clone()); + }); txs } /// Returns all transactions that satisfy the given basefee. - fn satisfy_base_fee_ids(&self, basefee: u128) -> Vec { - let mut transactions = Vec::new(); - { - let mut iter = self.by_id.iter().peekable(); - - while let Some((id, tx)) = iter.next() { - if tx.transaction.transaction.max_fee_per_gas() < basefee { - // still parked -> skip descendant transactions - 'this: while let Some((peek, _)) = iter.peek() { - if peek.sender != id.sender { - break 'this - } - iter.next(); + fn satisfy_base_fee_ids(&self, basefee: u128, mut tx_handler: F) + where + F: FnMut(&Arc>), + { + let mut iter = self.by_id.iter().peekable(); + + while let Some((id, tx)) = iter.next() { + if tx.transaction.transaction.max_fee_per_gas() < basefee { + // still parked -> skip descendant transactions + 'this: while let Some((peek, _)) = iter.peek() { + if peek.sender != id.sender { + break 'this } - } else { - transactions.push(*id); + iter.next(); } + } else { + tx_handler(&tx.transaction); } } - transactions } /// Removes all transactions from this subpool that can afford the given basefee, @@ -306,7 +304,10 @@ impl ParkedPool> { where F: FnMut(Arc>), { - let to_remove = self.satisfy_base_fee_ids(basefee as u128); + let mut to_remove = Vec::new(); + self.satisfy_base_fee_ids(basefee as u128, |tx| { + to_remove.push(*tx.id()); + }); for id in to_remove { if let Some(tx) = self.remove_transaction(&id) { diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 9bd1d092b4f..317066137da 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -921,8 +921,7 @@ mod tests { assert!(removed.is_empty()); // Verify that retrieving transactions from an empty pool yields nothing - let all_txs: Vec<_> = pool.all().collect(); - assert!(all_txs.is_empty()); + assert!(pool.all().next().is_none()); } #[test] diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index c9f625a1500..b4463d9ede3 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -36,6 +36,7 @@ derive_more.workspace = true rayon.workspace = true itertools.workspace = true tokio = { workspace = true, features = ["rt-multi-thread"] } +crossbeam-channel.workspace = true # `metrics` feature reth-metrics = { workspace = true, optional = true } diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index d6e1b57ed9b..ffa7aa4dc31 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -1,39 +1,19 @@ use crate::{ metrics::ParallelTrieMetrics, - proof_task::{ProofTaskKind, ProofTaskManagerHandle, StorageProofInput}, + proof_task::{AccountMultiproofInput, ProofWorkerHandle, StorageProofInput}, root::ParallelStateRootError, - stats::ParallelTrieTracker, StorageRootTargets, }; -use alloy_primitives::{ - map::{B256Map, B256Set, HashMap}, - B256, -}; -use alloy_rlp::{BufMut, Encodable}; +use alloy_primitives::{map::B256Set, B256}; use dashmap::DashMap; -use itertools::Itertools; use reth_execution_errors::StorageRootError; -use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx, - ProviderError, -}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, - node_iter::{TrieElement, TrieNodeIter}, - prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSetsMut}, - proof::StorageProof, - trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, + prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets, TriePrefixSetsMut}, updates::TrieUpdatesSorted, - walker::TrieWalker, - DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, - MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, -}; -use reth_trie_common::{ - added_removed_keys::MultiAddedRemovedKeys, - proof::{DecodedProofNodes, ProofRetainer}, + DecodedMultiProof, DecodedStorageMultiProof, HashedPostStateSorted, MultiProofTargets, Nibbles, }; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; +use reth_trie_common::added_removed_keys::MultiAddedRemovedKeys; use std::sync::{mpsc::Receiver, Arc}; use tracing::trace; @@ -42,9 +22,7 @@ use tracing::trace; /// This can collect proof for many targets in parallel, spawning a task for each hashed address /// that has proof targets. #[derive(Debug)] -pub struct ParallelProof { - /// Consistent view of the database. - view: ConsistentDbView, +pub struct ParallelProof { /// The sorted collection of cached in-memory intermediate trie nodes that /// can be reused for computation. pub nodes_sorted: Arc, @@ -58,8 +36,8 @@ pub struct ParallelProof { collect_branch_node_masks: bool, /// Provided by the user to give the necessary context to retain extra proofs. multi_added_removed_keys: Option>, - /// Handle to the storage proof task. - storage_proof_task_handle: ProofTaskManagerHandle>, + /// Handle to the proof worker pools. + proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps /// hashed (missed) addresses to their storage proof roots. missed_leaves_storage_roots: Arc>, @@ -67,25 +45,23 @@ pub struct ParallelProof { metrics: ParallelTrieMetrics, } -impl ParallelProof { +impl ParallelProof { /// Create new state proof generator. pub fn new( - view: ConsistentDbView, nodes_sorted: Arc, state_sorted: Arc, prefix_sets: Arc, missed_leaves_storage_roots: Arc>, - storage_proof_task_handle: ProofTaskManagerHandle>, + proof_worker_handle: ProofWorkerHandle, ) -> Self { Self { - view, nodes_sorted, state_sorted, prefix_sets, missed_leaves_storage_roots, collect_branch_node_masks: false, multi_added_removed_keys: None, - storage_proof_task_handle, + proof_worker_handle, #[cfg(feature = "metrics")] metrics: ParallelTrieMetrics::new_with_labels(&[("type", "proof")]), } @@ -106,19 +82,16 @@ impl ParallelProof { self.multi_added_removed_keys = multi_added_removed_keys; self } -} - -impl ParallelProof -where - Factory: DatabaseProviderFactory + Clone + 'static, -{ /// Queues a storage proof task and returns a receiver for the result. fn queue_storage_proof( &self, hashed_address: B256, prefix_set: PrefixSet, target_slots: B256Set, - ) -> Receiver> { + ) -> Result< + Receiver>, + ParallelStateRootError, + > { let input = StorageProofInput::new( hashed_address, prefix_set, @@ -127,10 +100,9 @@ where self.multi_added_removed_keys.clone(), ); - let (sender, receiver) = std::sync::mpsc::channel(); - let _ = - self.storage_proof_task_handle.queue_task(ProofTaskKind::StorageProof(input, sender)); - receiver + self.proof_worker_handle + .queue_storage_proof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string())) } /// Generate a storage multiproof according to the specified targets and hashed address. @@ -150,7 +122,7 @@ where "Starting storage proof generation" ); - let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots); + let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots)?; let proof_result = receiver.recv().map_err(|_| { ParallelStateRootError::StorageRoot(StorageRootError::Database(DatabaseError::Other( format!("channel closed for {hashed_address}"), @@ -167,16 +139,16 @@ where proof_result } - /// Generate a state multiproof according to specified targets. - pub fn decoded_multiproof( - self, - targets: MultiProofTargets, - ) -> Result { - let mut tracker = ParallelTrieTracker::default(); - - // Extend prefix sets with targets - let mut prefix_sets = (*self.prefix_sets).clone(); - prefix_sets.extend(TriePrefixSetsMut { + /// Extends prefix sets with the given multiproof targets and returns the frozen result. + /// + /// This is a helper function used to prepare prefix sets before computing multiproofs. + /// Returns frozen (immutable) prefix sets ready for use in proof computation. + pub fn extend_prefix_sets_with_targets( + base_prefix_sets: &TriePrefixSetsMut, + targets: &MultiProofTargets, + ) -> TriePrefixSets { + let mut extended = base_prefix_sets.clone(); + extended.extend(TriePrefixSetsMut { account_prefix_set: PrefixSetMut::from(targets.keys().copied().map(Nibbles::unpack)), storage_prefix_sets: targets .iter() @@ -187,13 +159,21 @@ where .collect(), destroyed_accounts: Default::default(), }); - let prefix_sets = prefix_sets.freeze(); + extended.freeze() + } + + /// Generate a state multiproof according to specified targets. + pub fn decoded_multiproof( + self, + targets: MultiProofTargets, + ) -> Result { + // Extend prefix sets with targets + let prefix_sets = Self::extend_prefix_sets_with_targets(&self.prefix_sets, &targets); - let storage_root_targets = StorageRootTargets::new( - prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), - prefix_sets.storage_prefix_sets.clone(), + let storage_root_targets_len = StorageRootTargets::count( + &prefix_sets.account_prefix_set, + &prefix_sets.storage_prefix_sets, ); - let storage_root_targets_len = storage_root_targets.len(); trace!( target: "trie::parallel_proof", @@ -201,150 +181,31 @@ where "Starting parallel proof generation" ); - // Pre-calculate storage roots for accounts which were changed. - tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); - - // stores the receiver for the storage proof outcome for the hashed addresses - // this way we can lazily await the outcome when we iterate over the map - let mut storage_proof_receivers = - B256Map::with_capacity_and_hasher(storage_root_targets.len(), Default::default()); + // Queue account multiproof request to account worker pool - for (hashed_address, prefix_set) in - storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) - { - let target_slots = targets.get(&hashed_address).cloned().unwrap_or_default(); - let receiver = self.queue_storage_proof(hashed_address, prefix_set, target_slots); - - // store the receiver for that result with the hashed address so we can await this in - // place when we iterate over the trie - storage_proof_receivers.insert(hashed_address, receiver); - } - - let provider_ro = self.view.provider_ro()?; - let trie_cursor_factory = InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), - &self.nodes_sorted, - ); - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &self.state_sorted, - ); + let input = AccountMultiproofInput { + targets, + prefix_sets, + collect_branch_node_masks: self.collect_branch_node_masks, + multi_added_removed_keys: self.multi_added_removed_keys.clone(), + missed_leaves_storage_roots: self.missed_leaves_storage_roots.clone(), + }; - let accounts_added_removed_keys = - self.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); + let receiver = self + .proof_worker_handle + .dispatch_account_multiproof(input) + .map_err(|e| ParallelStateRootError::Other(e.to_string()))?; - // Create the walker. - let walker = TrieWalker::<_>::state_trie( - trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, - prefix_sets.account_prefix_set, - ) - .with_added_removed_keys(accounts_added_removed_keys) - .with_deletions_retained(true); - - // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = targets - .keys() - .map(Nibbles::unpack) - .collect::() - .with_added_removed_keys(accounts_added_removed_keys); - let mut hash_builder = HashBuilder::default() - .with_proof_retainer(retainer) - .with_updates(self.collect_branch_node_masks); - - // Initialize all storage multiproofs as empty. - // Storage multiproofs for non empty tries will be overwritten if necessary. - let mut collected_decoded_storages: B256Map = - targets.keys().map(|key| (*key, DecodedStorageMultiProof::empty())).collect(); - let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); - let mut account_node_iter = TrieNodeIter::state_trie( - walker, - hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, - ); - while let Some(account_node) = - account_node_iter.try_next().map_err(ProviderError::Database)? - { - match account_node { - TrieElement::Branch(node) => { - hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); - } - TrieElement::Leaf(hashed_address, account) => { - let root = match storage_proof_receivers.remove(&hashed_address) { - Some(rx) => { - let decoded_storage_multiproof = rx.recv().map_err(|e| { - ParallelStateRootError::StorageRoot(StorageRootError::Database( - DatabaseError::Other(format!( - "channel closed for {hashed_address}: {e}" - )), - )) - })??; - let root = decoded_storage_multiproof.root; - collected_decoded_storages - .insert(hashed_address, decoded_storage_multiproof); - root - } - // Since we do not store all intermediate nodes in the database, there might - // be a possibility of re-adding a non-modified leaf to the hash builder. - None => { - tracker.inc_missed_leaves(); - - match self.missed_leaves_storage_roots.entry(hashed_address) { - dashmap::Entry::Occupied(occ) => *occ.get(), - dashmap::Entry::Vacant(vac) => { - let root = StorageProof::new_hashed( - trie_cursor_factory.clone(), - hashed_cursor_factory.clone(), - hashed_address, - ) - .with_prefix_set_mut(Default::default()) - .storage_multiproof( - targets.get(&hashed_address).cloned().unwrap_or_default(), - ) - .map_err(|e| { - ParallelStateRootError::StorageRoot( - StorageRootError::Database(DatabaseError::Other( - e.to_string(), - )), - ) - })? - .root; - vac.insert(root); - root - } - } - } - }; - - // Encode account - account_rlp.clear(); - let account = account.into_trie_account(root); - account.encode(&mut account_rlp as &mut dyn BufMut); - - hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - } - } - } - let _ = hash_builder.root(); + // Wait for account multiproof result from worker + let (multiproof, stats) = receiver.recv().map_err(|_| { + ParallelStateRootError::Other( + "Account multiproof channel dropped: worker died or pool shutdown".to_string(), + ) + })??; - let stats = tracker.finish(); #[cfg(feature = "metrics")] self.metrics.record(stats); - let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); - let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; - - let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { - let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); - ( - updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), - updated_branch_nodes - .into_iter() - .map(|(path, node)| (path, node.tree_mask)) - .collect(), - ) - } else { - (HashMap::default(), HashMap::default()) - }; - trace!( target: "trie::parallel_proof", total_targets = storage_root_targets_len, @@ -356,28 +217,26 @@ where "Calculated decoded proof" ); - Ok(DecodedMultiProof { - account_subtree: decoded_account_subtree, - branch_node_hash_masks, - branch_node_tree_masks, - storages: collected_decoded_storages, - }) + Ok(multiproof) } } #[cfg(test)] mod tests { use super::*; - use crate::proof_task::{ProofTaskCtx, ProofTaskManager}; + use crate::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use alloy_primitives::{ keccak256, - map::{B256Set, DefaultHashBuilder}, + map::{B256Set, DefaultHashBuilder, HashMap}, Address, U256, }; use rand::Rng; use reth_primitives_traits::{Account, StorageEntry}; - use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, + }; use reth_trie::proof::Proof; + use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use tokio::runtime::Runtime; #[test] @@ -447,21 +306,15 @@ mod tests { let task_ctx = ProofTaskCtx::new(Default::default(), Default::default(), Default::default()); - let proof_task = - ProofTaskManager::new(rt.handle().clone(), consistent_view.clone(), task_ctx, 1); - let proof_task_handle = proof_task.handle(); - - // keep the join handle around to make sure it does not return any errors - // after we compute the state root - let join_handle = rt.spawn_blocking(move || proof_task.run()); + let proof_worker_handle = + ProofWorkerHandle::new(rt.handle().clone(), consistent_view, task_ctx, 1, 1); let parallel_result = ParallelProof::new( - consistent_view, Default::default(), Default::default(), Default::default(), Default::default(), - proof_task_handle.clone(), + proof_worker_handle.clone(), ) .decoded_multiproof(targets.clone()) .unwrap(); @@ -489,9 +342,7 @@ mod tests { // then compare the entire thing for any mask differences assert_eq!(parallel_result, sequential_result_decoded); - // drop the handle to terminate the task and then block on the proof task handle to make - // sure it does not return any errors - drop(proof_task_handle); - rt.block_on(join_handle).unwrap().expect("The proof task should not return an error"); + // Workers shut down automatically when handle is dropped + drop(proof_worker_handle); } } diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 9bb96d4b19e..5c26f6d99c3 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -1,217 +1,624 @@ -//! A Task that manages sending proof requests to a number of tasks that have longer-running -//! database transactions. +//! Parallel proof computation using worker pools with dedicated database transactions. //! -//! The [`ProofTaskManager`] ensures that there are a max number of currently executing proof tasks, -//! and is responsible for managing the fixed number of database transactions created at the start -//! of the task. +//! +//! # Architecture +//! +//! - **Worker Pools**: Pre-spawned workers with dedicated database transactions +//! - Storage pool: Handles storage proofs and blinded storage node requests +//! - Account pool: Handles account multiproofs and blinded account node requests +//! - **Direct Channel Access**: [`ProofWorkerHandle`] provides type-safe queue methods with direct +//! access to worker channels, eliminating routing overhead +//! - **Automatic Shutdown**: Workers terminate gracefully when all handles are dropped //! //! Individual [`ProofTaskTx`] instances manage a dedicated [`InMemoryTrieCursorFactory`] and //! [`HashedPostStateCursorFactory`], which are each backed by a database transaction. -use crate::root::ParallelStateRootError; -use alloy_primitives::{map::B256Set, B256}; +use crate::{ + root::ParallelStateRootError, + stats::{ParallelTrieStats, ParallelTrieTracker}, + StorageRootTargets, +}; +use alloy_primitives::{ + map::{B256Map, B256Set}, + B256, +}; +use alloy_rlp::{BufMut, Encodable}; +use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; +use dashmap::DashMap; use reth_db_api::transaction::DbTx; -use reth_execution_errors::SparseTrieError; +use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_provider::{ - providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, FactoryTx, - ProviderResult, + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, }; +use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, - prefix_set::TriePrefixSetsMut, + hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + node_iter::{TrieElement, TrieNodeIter}, + prefix_set::{TriePrefixSets, TriePrefixSetsMut}, proof::{ProofTrieNodeProviderFactory, StorageProof}, - trie_cursor::InMemoryTrieCursorFactory, + trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdatesSorted, - DecodedStorageMultiProof, HashedPostStateSorted, Nibbles, + walker::TrieWalker, + DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostStateSorted, + MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::{ added_removed_keys::MultiAddedRemovedKeys, prefix_set::{PrefixSet, PrefixSetMut}, + proof::{DecodedProofNodes, ProofRetainer}, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ - collections::VecDeque, sync::{ - atomic::{AtomicUsize, Ordering}, - mpsc::{channel, Receiver, SendError, Sender}, + mpsc::{channel, Receiver, Sender}, Arc, }, time::Instant, }; use tokio::runtime::Handle; -use tracing::{debug, trace}; +use tracing::trace; #[cfg(feature = "metrics")] -use crate::proof_task_metrics::ProofTaskMetrics; +use crate::proof_task_metrics::ProofTaskTrieMetrics; type StorageProofResult = Result; type TrieNodeProviderResult = Result, SparseTrieError>; +type AccountMultiproofResult = + Result<(DecodedMultiProof, ParallelTrieStats), ParallelStateRootError>; -/// A task that manages sending multiproof requests to a number of tasks that have longer-running -/// database transactions +/// Internal message for storage workers. #[derive(Debug)] -pub struct ProofTaskManager { - /// Max number of database transactions to create - max_concurrency: usize, - /// Number of database transactions created - total_transactions: usize, - /// Consistent view provider used for creating transactions on-demand +enum StorageWorkerJob { + /// Storage proof computation request + StorageProof { + /// Storage proof input parameters + input: StorageProofInput, + /// Channel to send result back to original caller + result_sender: Sender, + }, + /// Blinded storage node retrieval request + BlindedStorageNode { + /// Target account + account: B256, + /// Path to the storage node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender, + }, +} + +/// Worker loop for storage trie operations. +/// +/// # Lifecycle +/// +/// Each worker: +/// 1. Receives `StorageWorkerJob` from crossbeam unbounded channel +/// 2. Computes result using its dedicated long-lived transaction +/// 3. Sends result directly to original caller via `std::mpsc` +/// 4. Repeats until channel closes (graceful shutdown) +/// +/// # Transaction Reuse +/// +/// Reuses the same transaction and cursor factories across multiple operations +/// to avoid transaction creation and cursor factory setup overhead. +/// +/// # Panic Safety +/// +/// If this function panics, the worker thread terminates but other workers +/// continue operating and the system degrades gracefully. +/// +/// # Shutdown +/// +/// Worker shuts down when the crossbeam channel closes (all senders dropped). +fn storage_worker_loop( view: ConsistentDbView, - /// Proof task context shared across all proof tasks task_ctx: ProofTaskCtx, - /// Proof tasks pending execution - pending_tasks: VecDeque, - /// The underlying handle from which to spawn proof tasks - executor: Handle, - /// The proof task transactions, containing owned cursor factories that are reused for proof - /// calculation. - proof_task_txs: Vec>>, - /// A receiver for new proof tasks. - proof_task_rx: Receiver>>, - /// A sender for sending back transactions. - tx_sender: Sender>>, - /// The number of active handles. - /// - /// Incremented in [`ProofTaskManagerHandle::new`] and decremented in - /// [`ProofTaskManagerHandle::drop`]. - active_handles: Arc, - /// Metrics tracking blinded node fetches. + work_rx: CrossbeamReceiver, + worker_id: usize, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, +) where + Factory: DatabaseProviderFactory, +{ + // Create db transaction before entering work loop + let provider = + view.provider_ro().expect("Storage worker failed to initialize: database unavailable"); + let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); + + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Storage worker started" + ); + + // Create factories once at worker startup to avoid recreation overhead. + let (trie_cursor_factory, hashed_cursor_factory) = proof_tx.create_factories(); + + // Create blinded provider factory once for all blinded node requests + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + proof_tx.task_ctx.prefix_sets.clone(), + ); + + let mut storage_proofs_processed = 0u64; + let mut storage_nodes_processed = 0u64; + + while let Ok(job) = work_rx.recv() { + match job { + StorageWorkerJob::StorageProof { input, result_sender } => { + let hashed_address = input.hashed_address; + + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + prefix_set_len = input.prefix_set.len(), + target_slots = input.target_slots.len(), + "Processing storage proof" + ); + + let proof_start = Instant::now(); + let result = proof_tx.compute_storage_proof( + input, + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + ); + + let proof_elapsed = proof_start.elapsed(); + storage_proofs_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + storage_proofs_processed, + "Storage proof receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + proof_time_us = proof_elapsed.as_micros(), + total_processed = storage_proofs_processed, + "Storage proof completed" + ); + } + + StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + "Processing blinded storage node" + ); + + let start = Instant::now(); + let result = + blinded_provider_factory.storage_node_provider(account).trie_node(&path); + let elapsed = start.elapsed(); + + storage_nodes_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + storage_nodes_processed, + "Blinded storage node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + elapsed_us = elapsed.as_micros(), + total_processed = storage_nodes_processed, + "Blinded storage node completed" + ); + } + } + } + + tracing::debug!( + target: "trie::proof_task", + worker_id, + storage_proofs_processed, + storage_nodes_processed, + "Storage worker shutting down" + ); + #[cfg(feature = "metrics")] - metrics: ProofTaskMetrics, + metrics.record_storage_nodes(storage_nodes_processed as usize); } -impl ProofTaskManager { - /// Creates a new [`ProofTaskManager`] with the given max concurrency, creating that number of - /// cursor factories. - /// - /// Returns an error if the consistent view provider fails to create a read-only transaction. - pub fn new( - executor: Handle, - view: ConsistentDbView, - task_ctx: ProofTaskCtx, - max_concurrency: usize, - ) -> Self { - let (tx_sender, proof_task_rx) = channel(); - Self { - max_concurrency, - total_transactions: 0, - view, - task_ctx, - pending_tasks: VecDeque::new(), - executor, - proof_task_txs: Vec::new(), - proof_task_rx, - tx_sender, - active_handles: Arc::new(AtomicUsize::new(0)), - #[cfg(feature = "metrics")] - metrics: ProofTaskMetrics::default(), +/// Worker loop for account trie operations. +/// +/// # Lifecycle +/// +/// Each worker: +/// 1. Receives `AccountWorkerJob` from crossbeam unbounded channel +/// 2. Computes result using its dedicated long-lived transaction +/// 3. Sends result directly to original caller via `std::mpsc` +/// 4. Repeats until channel closes (graceful shutdown) +/// +/// # Transaction Reuse +/// +/// Reuses the same transaction and cursor factories across multiple operations +/// to avoid transaction creation and cursor factory setup overhead. +/// +/// # Panic Safety +/// +/// If this function panics, the worker thread terminates but other workers +/// continue operating and the system degrades gracefully. +/// +/// # Shutdown +/// +/// Worker shuts down when the crossbeam channel closes (all senders dropped). +fn account_worker_loop( + view: ConsistentDbView, + task_ctx: ProofTaskCtx, + work_rx: CrossbeamReceiver, + storage_work_tx: CrossbeamSender, + worker_id: usize, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, +) where + Factory: DatabaseProviderFactory, +{ + // Create db transaction before entering work loop + let provider = + view.provider_ro().expect("Account worker failed to initialize: database unavailable"); + let proof_tx = ProofTaskTx::new(provider.into_tx(), task_ctx, worker_id); + + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Account worker started" + ); + + // Create factories once at worker startup to avoid recreation overhead. + let (trie_cursor_factory, hashed_cursor_factory) = proof_tx.create_factories(); + + // Create blinded provider factory once for all blinded node requests + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + proof_tx.task_ctx.prefix_sets.clone(), + ); + + let mut account_proofs_processed = 0u64; + let mut account_nodes_processed = 0u64; + + while let Ok(job) = work_rx.recv() { + match job { + AccountWorkerJob::AccountMultiproof { mut input, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + targets = input.targets.len(), + "Processing account multiproof" + ); + + let proof_start = Instant::now(); + let mut tracker = ParallelTrieTracker::default(); + + let mut storage_prefix_sets = + std::mem::take(&mut input.prefix_sets.storage_prefix_sets); + + let storage_root_targets_len = StorageRootTargets::count( + &input.prefix_sets.account_prefix_set, + &storage_prefix_sets, + ); + tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); + + let storage_proof_receivers = match dispatch_storage_proofs( + &storage_work_tx, + &input.targets, + &mut storage_prefix_sets, + input.collect_branch_node_masks, + input.multi_added_removed_keys.as_ref(), + ) { + Ok(receivers) => receivers, + Err(error) => { + let _ = result_sender.send(Err(error)); + continue; + } + }; + + // Use the missed leaves cache passed from the multiproof manager + let missed_leaves_storage_roots = &input.missed_leaves_storage_roots; + + let account_prefix_set = std::mem::take(&mut input.prefix_sets.account_prefix_set); + + let ctx = AccountMultiproofParams { + targets: &input.targets, + prefix_set: account_prefix_set, + collect_branch_node_masks: input.collect_branch_node_masks, + multi_added_removed_keys: input.multi_added_removed_keys.as_ref(), + storage_proof_receivers, + missed_leaves_storage_roots, + }; + + let result = build_account_multiproof_with_storage_roots( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + ctx, + &mut tracker, + ); + + let proof_elapsed = proof_start.elapsed(); + let stats = tracker.finish(); + let result = result.map(|proof| (proof, stats)); + account_proofs_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + "Account multiproof receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + proof_time_us = proof_elapsed.as_micros(), + total_processed = account_proofs_processed, + "Account multiproof completed" + ); + } + + AccountWorkerJob::BlindedAccountNode { path, result_sender } => { + trace!( + target: "trie::proof_task", + worker_id, + ?path, + "Processing blinded account node" + ); + + let start = Instant::now(); + let result = blinded_provider_factory.account_node_provider().trie_node(&path); + let elapsed = start.elapsed(); + + account_nodes_processed += 1; + + if result_sender.send(result).is_err() { + tracing::debug!( + target: "trie::proof_task", + worker_id, + ?path, + account_nodes_processed, + "Blinded account node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + ?path, + node_time_us = elapsed.as_micros(), + total_processed = account_nodes_processed, + "Blinded account node completed" + ); + } } } - /// Returns a handle for sending new proof tasks to the [`ProofTaskManager`]. - pub fn handle(&self) -> ProofTaskManagerHandle> { - ProofTaskManagerHandle::new(self.tx_sender.clone(), self.active_handles.clone()) - } + tracing::debug!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + account_nodes_processed, + "Account worker shutting down" + ); + + #[cfg(feature = "metrics")] + metrics.record_account_nodes(account_nodes_processed as usize); } -impl ProofTaskManager +/// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. +/// +/// This is a helper function used by account workers to build the account subtree proof +/// while storage proofs are still being computed. Receivers are consumed only when needed, +/// enabling interleaved parallelism between account trie traversal and storage proof computation. +/// +/// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. +fn build_account_multiproof_with_storage_roots( + trie_cursor_factory: C, + hashed_cursor_factory: H, + ctx: AccountMultiproofParams<'_>, + tracker: &mut ParallelTrieTracker, +) -> Result where - Factory: DatabaseProviderFactory + 'static, + C: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, { - /// Inserts the task into the pending tasks queue. - pub fn queue_proof_task(&mut self, task: ProofTaskKind) { - self.pending_tasks.push_back(task); - } + let accounts_added_removed_keys = + ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); + + // Create the walker. + let walker = TrieWalker::<_>::state_trie( + trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, + ctx.prefix_set, + ) + .with_added_removed_keys(accounts_added_removed_keys) + .with_deletions_retained(true); + + // Create a hash builder to rebuild the root node since it is not available in the database. + let retainer = ctx + .targets + .keys() + .map(Nibbles::unpack) + .collect::() + .with_added_removed_keys(accounts_added_removed_keys); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(ctx.collect_branch_node_masks); + + // Initialize storage multiproofs map with pre-allocated capacity. + // Proofs will be inserted as they're consumed from receivers during trie walk. + let mut collected_decoded_storages: B256Map = + B256Map::with_capacity_and_hasher(ctx.targets.len(), Default::default()); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); + let mut account_node_iter = TrieNodeIter::state_trie( + walker, + hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + ); + + let mut storage_proof_receivers = ctx.storage_proof_receivers; + + while let Some(account_node) = account_node_iter.try_next().map_err(ProviderError::Database)? { + match account_node { + TrieElement::Branch(node) => { + hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); + } + TrieElement::Leaf(hashed_address, account) => { + let root = match storage_proof_receivers.remove(&hashed_address) { + Some(receiver) => { + // Block on this specific storage proof receiver - enables interleaved + // parallelism + let proof = receiver.recv().map_err(|_| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(format!( + "Storage proof channel closed for {hashed_address}" + )), + ), + ) + })??; + let root = proof.root; + collected_decoded_storages.insert(hashed_address, proof); + root + } + // Since we do not store all intermediate nodes in the database, there might + // be a possibility of re-adding a non-modified leaf to the hash builder. + None => { + tracker.inc_missed_leaves(); + + match ctx.missed_leaves_storage_roots.entry(hashed_address) { + dashmap::Entry::Occupied(occ) => *occ.get(), + dashmap::Entry::Vacant(vac) => { + let root = StorageProof::new_hashed( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + hashed_address, + ) + .with_prefix_set_mut(Default::default()) + .storage_multiproof( + ctx.targets.get(&hashed_address).cloned().unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(e.to_string()), + ), + ) + })? + .root; + + vac.insert(root); + root + } + } + } + }; - /// Gets either the next available transaction, or creates a new one if all are in use and the - /// total number of transactions created is less than the max concurrency. - pub fn get_or_create_tx(&mut self) -> ProviderResult>>> { - if let Some(proof_task_tx) = self.proof_task_txs.pop() { - return Ok(Some(proof_task_tx)); - } + // Encode account + account_rlp.clear(); + let account = account.into_trie_account(root); + account.encode(&mut account_rlp as &mut dyn BufMut); - // if we can create a new tx within our concurrency limits, create one on-demand - if self.total_transactions < self.max_concurrency { - let provider_ro = self.view.provider_ro()?; - let tx = provider_ro.into_tx(); - self.total_transactions += 1; - return Ok(Some(ProofTaskTx::new(tx, self.task_ctx.clone(), self.total_transactions))); + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + } } + } - Ok(None) + // Consume remaining storage proof receivers for accounts not encountered during trie walk. + for (hashed_address, receiver) in storage_proof_receivers { + if let Ok(Ok(proof)) = receiver.recv() { + collected_decoded_storages.insert(hashed_address, proof); + } } - /// Spawns the next queued proof task on the executor with the given input, if there are any - /// transactions available. - /// - /// This will return an error if a transaction must be created on-demand and the consistent view - /// provider fails. - pub fn try_spawn_next(&mut self) -> ProviderResult<()> { - let Some(task) = self.pending_tasks.pop_front() else { return Ok(()) }; - - let Some(proof_task_tx) = self.get_or_create_tx()? else { - // if there are no txs available, requeue the proof task - self.pending_tasks.push_front(task); - return Ok(()) - }; - - let tx_sender = self.tx_sender.clone(); - self.executor.spawn_blocking(move || match task { - ProofTaskKind::StorageProof(input, sender) => { - proof_task_tx.storage_proof(input, sender, tx_sender); - } - ProofTaskKind::BlindedAccountNode(path, sender) => { - proof_task_tx.blinded_account_node(path, sender, tx_sender); - } - ProofTaskKind::BlindedStorageNode(account, path, sender) => { - proof_task_tx.blinded_storage_node(account, path, sender, tx_sender); - } - }); + let _ = hash_builder.root(); - Ok(()) - } + let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); + let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; - /// Loops, managing the proof tasks, and sending new tasks to the executor. - pub fn run(mut self) -> ProviderResult<()> { - loop { - match self.proof_task_rx.recv() { - Ok(message) => match message { - ProofTaskMessage::QueueTask(task) => { - // Track metrics for blinded node requests - #[cfg(feature = "metrics")] - match &task { - ProofTaskKind::BlindedAccountNode(_, _) => { - self.metrics.account_nodes += 1; - } - ProofTaskKind::BlindedStorageNode(_, _, _) => { - self.metrics.storage_nodes += 1; - } - _ => {} - } - // queue the task - self.queue_proof_task(task) - } - ProofTaskMessage::Transaction(tx) => { - // return the transaction to the pool - self.proof_task_txs.push(tx); - } - ProofTaskMessage::Terminate => { - // Record metrics before terminating - #[cfg(feature = "metrics")] - self.metrics.record(); - return Ok(()) - } - }, - // All senders are disconnected, so we can terminate - // However this should never happen, as this struct stores a sender - Err(_) => return Ok(()), - }; - - // try spawning the next task - self.try_spawn_next()?; - } + let (branch_node_hash_masks, branch_node_tree_masks) = if ctx.collect_branch_node_masks { + let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); + ( + updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), + updated_branch_nodes.into_iter().map(|(path, node)| (path, node.tree_mask)).collect(), + ) + } else { + (Default::default(), Default::default()) + }; + + Ok(DecodedMultiProof { + account_subtree: decoded_account_subtree, + branch_node_hash_masks, + branch_node_tree_masks, + storages: collected_decoded_storages, + }) +} + +/// Queues storage proofs for all accounts in the targets and returns receivers. +/// +/// This function queues all storage proof tasks to the worker pool but returns immediately +/// with receivers, allowing the account trie walk to proceed in parallel with storage proof +/// computation. This enables interleaved parallelism for better performance. +/// +/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. +fn dispatch_storage_proofs( + storage_work_tx: &CrossbeamSender, + targets: &MultiProofTargets, + storage_prefix_sets: &mut B256Map, + with_branch_node_masks: bool, + multi_added_removed_keys: Option<&Arc>, +) -> Result>, ParallelStateRootError> { + let mut storage_proof_receivers = + B256Map::with_capacity_and_hasher(targets.len(), Default::default()); + + // Queue all storage proofs to worker pool + for (hashed_address, target_slots) in targets.iter() { + let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); + + // Always queue a storage proof so we obtain the storage root even when no slots are + // requested. + let input = StorageProofInput::new( + *hashed_address, + prefix_set, + target_slots.clone(), + with_branch_node_masks, + multi_added_removed_keys.cloned(), + ); + + let (sender, receiver) = channel(); + + // If queuing fails, propagate error up (no fallback) + storage_work_tx + .send(StorageWorkerJob::StorageProof { input, result_sender: sender }) + .map_err(|_| { + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {}: storage worker pool unavailable", + hashed_address + )) + })?; + + storage_proof_receivers.insert(*hashed_address, receiver); } + + Ok(storage_proof_receivers) } /// Type alias for the factory tuple returned by `create_factories` @@ -229,8 +636,7 @@ pub struct ProofTaskTx { /// Trie updates, prefix sets, and state updates task_ctx: ProofTaskCtx, - /// Identifier for the tx within the context of a single [`ProofTaskManager`], used only for - /// tracing. + /// Identifier for the worker within the worker pool, used only for tracing. id: usize, } @@ -246,6 +652,7 @@ impl ProofTaskTx where Tx: DbTx, { + #[inline] fn create_factories(&self) -> ProofFactories<'_, Tx> { let trie_cursor_factory = InMemoryTrieCursorFactory::new( DatabaseTrieCursorFactory::new(&self.tx), @@ -260,172 +667,70 @@ where (trie_cursor_factory, hashed_cursor_factory) } - /// Calculates a storage proof for the given hashed address, and desired prefix set. - fn storage_proof( - self, + /// Compute storage proof with pre-created factories. + /// + /// Accepts cursor factories as parameters to allow reuse across multiple proofs. + /// Used by storage workers in the worker pool to avoid factory recreation + /// overhead on each proof computation. + #[inline] + fn compute_storage_proof( + &self, input: StorageProofInput, - result_sender: Sender, - tx_sender: Sender>, - ) { - trace!( - target: "trie::proof_task", - hashed_address=?input.hashed_address, - "Starting storage proof task calculation" - ); + trie_cursor_factory: impl TrieCursorFactory, + hashed_cursor_factory: impl HashedCursorFactory, + ) -> StorageProofResult { + // Consume the input so we can move large collections (e.g. target slots) without cloning. + let StorageProofInput { + hashed_address, + prefix_set, + target_slots, + with_branch_node_masks, + multi_added_removed_keys, + } = input; - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - let multi_added_removed_keys = input - .multi_added_removed_keys - .unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); - let added_removed_keys = multi_added_removed_keys.get_storage(&input.hashed_address); + // Get or create added/removed keys context + let multi_added_removed_keys = + multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); + let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); let span = tracing::trace_span!( target: "trie::proof_task", "Storage proof calculation", - hashed_address=?input.hashed_address, - // Add a unique id because we often have parallel storage proof calculations for the - // same hashed address, and we want to differentiate them during trace analysis. - span_id=self.id, + hashed_address = ?hashed_address, + worker_id = self.id, ); - let span_guard = span.enter(); + let _span_guard = span.enter(); - let target_slots_len = input.target_slots.len(); let proof_start = Instant::now(); - let raw_proof_result = StorageProof::new_hashed( - trie_cursor_factory, - hashed_cursor_factory, - input.hashed_address, - ) - .with_prefix_set_mut(PrefixSetMut::from(input.prefix_set.iter().copied())) - .with_branch_node_masks(input.with_branch_node_masks) - .with_added_removed_keys(added_removed_keys) - .storage_multiproof(input.target_slots) - .map_err(|e| ParallelStateRootError::Other(e.to_string())); - - drop(span_guard); + // Compute raw storage multiproof + let raw_proof_result = + StorageProof::new_hashed(trie_cursor_factory, hashed_cursor_factory, hashed_address) + .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().copied())) + .with_branch_node_masks(with_branch_node_masks) + .with_added_removed_keys(added_removed_keys) + .storage_multiproof(target_slots) + .map_err(|e| ParallelStateRootError::Other(e.to_string())); + // Decode proof into DecodedStorageMultiProof let decoded_result = raw_proof_result.and_then(|raw_proof| { raw_proof.try_into().map_err(|e: alloy_rlp::Error| { ParallelStateRootError::Other(format!( "Failed to decode storage proof for {}: {}", - input.hashed_address, e + hashed_address, e )) }) }); trace!( target: "trie::proof_task", - hashed_address=?input.hashed_address, - prefix_set = ?input.prefix_set.len(), - target_slots = ?target_slots_len, - proof_time = ?proof_start.elapsed(), - "Completed storage proof task calculation" + hashed_address = ?hashed_address, + proof_time_us = proof_start.elapsed().as_micros(), + worker_id = self.id, + "Completed storage proof calculation" ); - // send the result back - if let Err(error) = result_sender.send(decoded_result) { - debug!( - target: "trie::proof_task", - hashed_address = ?input.hashed_address, - ?error, - task_time = ?proof_start.elapsed(), - "Storage proof receiver is dropped, discarding the result" - ); - } - - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); - } - - /// Retrieves blinded account node by path. - fn blinded_account_node( - self, - path: Nibbles, - result_sender: Sender, - tx_sender: Sender>, - ) { - trace!( - target: "trie::proof_task", - ?path, - "Starting blinded account node retrieval" - ); - - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory, - hashed_cursor_factory, - self.task_ctx.prefix_sets.clone(), - ); - - let start = Instant::now(); - let result = blinded_provider_factory.account_node_provider().trie_node(&path); - trace!( - target: "trie::proof_task", - ?path, - elapsed = ?start.elapsed(), - "Completed blinded account node retrieval" - ); - - if let Err(error) = result_sender.send(result) { - tracing::error!( - target: "trie::proof_task", - ?path, - ?error, - "Failed to send blinded account node result" - ); - } - - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); - } - - /// Retrieves blinded storage node of the given account by path. - fn blinded_storage_node( - self, - account: B256, - path: Nibbles, - result_sender: Sender, - tx_sender: Sender>, - ) { - trace!( - target: "trie::proof_task", - ?account, - ?path, - "Starting blinded storage node retrieval" - ); - - let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - - let blinded_provider_factory = ProofTrieNodeProviderFactory::new( - trie_cursor_factory, - hashed_cursor_factory, - self.task_ctx.prefix_sets.clone(), - ); - - let start = Instant::now(); - let result = blinded_provider_factory.storage_node_provider(account).trie_node(&path); - trace!( - target: "trie::proof_task", - ?account, - ?path, - elapsed = ?start.elapsed(), - "Completed blinded storage node retrieval" - ); - - if let Err(error) = result_sender.send(result) { - tracing::error!( - target: "trie::proof_task", - ?account, - ?path, - ?error, - "Failed to send blinded storage node result" - ); - } - - // send the tx back - let _ = tx_sender.send(ProofTaskMessage::Transaction(self)); + decoded_result } } @@ -464,6 +769,56 @@ impl StorageProofInput { } } +/// Input parameters for account multiproof computation. +#[derive(Debug, Clone)] +pub struct AccountMultiproofInput { + /// The targets for which to compute the multiproof. + pub targets: MultiProofTargets, + /// The prefix sets for the proof calculation. + pub prefix_sets: TriePrefixSets, + /// Whether or not to collect branch node masks. + pub collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + pub multi_added_removed_keys: Option>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + pub missed_leaves_storage_roots: Arc>, +} + +/// Parameters for building an account multiproof with pre-computed storage roots. +struct AccountMultiproofParams<'a> { + /// The targets for which to compute the multiproof. + targets: &'a MultiProofTargets, + /// The prefix set for the account trie walk. + prefix_set: PrefixSet, + /// Whether or not to collect branch node masks. + collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option<&'a Arc>, + /// Receivers for storage proofs being computed in parallel. + storage_proof_receivers: B256Map>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + missed_leaves_storage_roots: &'a DashMap, +} + +/// Internal message for account workers. +#[derive(Debug)] +enum AccountWorkerJob { + /// Account multiproof computation request + AccountMultiproof { + /// Account multiproof input parameters + input: AccountMultiproofInput, + /// Channel to send result back to original caller + result_sender: Sender, + }, + /// Blinded account node retrieval request + BlindedAccountNode { + /// Path to the account node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender, + }, +} + /// Data used for initializing cursor factories that is shared across all storage proof instances. #[derive(Debug, Clone)] pub struct ProofTaskCtx { @@ -489,121 +844,272 @@ impl ProofTaskCtx { } } -/// Message used to communicate with [`ProofTaskManager`]. -#[derive(Debug)] -pub enum ProofTaskMessage { - /// A request to queue a proof task. - QueueTask(ProofTaskKind), - /// A returned database transaction. - Transaction(ProofTaskTx), - /// A request to terminate the proof task manager. - Terminate, -} - -/// Proof task kind. +/// A handle that provides type-safe access to proof worker pools. /// -/// When queueing a task using [`ProofTaskMessage::QueueTask`], this enum -/// specifies the type of proof task to be executed. -#[derive(Debug)] -pub enum ProofTaskKind { - /// A storage proof request. - StorageProof(StorageProofInput, Sender), - /// A blinded account node request. - BlindedAccountNode(Nibbles, Sender), - /// A blinded storage node request. - BlindedStorageNode(B256, Nibbles, Sender), +/// The handle stores direct senders to both storage and account worker pools, +/// eliminating the need for a routing thread. All handles share reference-counted +/// channels, and workers shut down gracefully when all handles are dropped. +#[derive(Debug, Clone)] +pub struct ProofWorkerHandle { + /// Direct sender to storage worker pool + storage_work_tx: CrossbeamSender, + /// Direct sender to account worker pool + account_work_tx: CrossbeamSender, } -/// A handle that wraps a single proof task sender that sends a terminate message on `Drop` if the -/// number of active handles went to zero. -#[derive(Debug)] -pub struct ProofTaskManagerHandle { - /// The sender for the proof task manager. - sender: Sender>, - /// The number of active handles. - active_handles: Arc, -} +impl ProofWorkerHandle { + /// Spawns storage and account worker pools with dedicated database transactions. + /// + /// Returns a handle for submitting proof tasks to the worker pools. + /// Workers run until the last handle is dropped. + /// + /// # Parameters + /// - `executor`: Tokio runtime handle for spawning blocking tasks + /// - `view`: Consistent database view for creating transactions + /// - `task_ctx`: Shared context with trie updates and prefix sets + /// - `storage_worker_count`: Number of storage workers to spawn + /// - `account_worker_count`: Number of account workers to spawn + pub fn new( + executor: Handle, + view: ConsistentDbView, + task_ctx: ProofTaskCtx, + storage_worker_count: usize, + account_worker_count: usize, + ) -> Self + where + Factory: DatabaseProviderFactory + Clone + 'static, + { + let (storage_work_tx, storage_work_rx) = unbounded::(); + let (account_work_tx, account_work_rx) = unbounded::(); + + tracing::debug!( + target: "trie::proof_task", + storage_worker_count, + account_worker_count, + "Spawning proof worker pools" + ); -impl ProofTaskManagerHandle { - /// Creates a new [`ProofTaskManagerHandle`] with the given sender. - pub fn new(sender: Sender>, active_handles: Arc) -> Self { - active_handles.fetch_add(1, Ordering::SeqCst); - Self { sender, active_handles } + // Spawn storage workers + for worker_id in 0..storage_worker_count { + let view_clone = view.clone(); + let task_ctx_clone = task_ctx.clone(); + let work_rx_clone = storage_work_rx.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + storage_worker_loop( + view_clone, + task_ctx_clone, + work_rx_clone, + worker_id, + #[cfg(feature = "metrics")] + metrics, + ) + }); + + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Storage worker spawned successfully" + ); + } + + // Spawn account workers + for worker_id in 0..account_worker_count { + let view_clone = view.clone(); + let task_ctx_clone = task_ctx.clone(); + let work_rx_clone = account_work_rx.clone(); + let storage_work_tx_clone = storage_work_tx.clone(); + + executor.spawn_blocking(move || { + #[cfg(feature = "metrics")] + let metrics = ProofTaskTrieMetrics::default(); + + account_worker_loop( + view_clone, + task_ctx_clone, + work_rx_clone, + storage_work_tx_clone, + worker_id, + #[cfg(feature = "metrics")] + metrics, + ) + }); + + tracing::debug!( + target: "trie::proof_task", + worker_id, + "Account worker spawned successfully" + ); + } + + Self::new_handle(storage_work_tx, account_work_tx) } - /// Queues a task to the proof task manager. - pub fn queue_task(&self, task: ProofTaskKind) -> Result<(), SendError>> { - self.sender.send(ProofTaskMessage::QueueTask(task)) + /// Creates a new [`ProofWorkerHandle`] with direct access to worker pools. + /// + /// This is an internal constructor used for creating handles. + const fn new_handle( + storage_work_tx: CrossbeamSender, + account_work_tx: CrossbeamSender, + ) -> Self { + Self { storage_work_tx, account_work_tx } } - /// Terminates the proof task manager. - pub fn terminate(&self) { - let _ = self.sender.send(ProofTaskMessage::Terminate); + /// Queue a storage proof computation + pub fn queue_storage_proof( + &self, + input: StorageProofInput, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.storage_work_tx + .send(StorageWorkerJob::StorageProof { input, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("storage workers unavailable")) + })?; + + Ok(rx) } -} -impl Clone for ProofTaskManagerHandle { - fn clone(&self) -> Self { - Self::new(self.sender.clone(), self.active_handles.clone()) + /// Queue an account multiproof computation + pub fn dispatch_account_multiproof( + &self, + input: AccountMultiproofInput, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.account_work_tx + .send(AccountWorkerJob::AccountMultiproof { input, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("account workers unavailable")) + })?; + + Ok(rx) } -} -impl Drop for ProofTaskManagerHandle { - fn drop(&mut self) { - // Decrement the number of active handles and terminate the manager if it was the last - // handle. - if self.active_handles.fetch_sub(1, Ordering::SeqCst) == 1 { - self.terminate(); - } + /// Internal: Queue blinded storage node request + fn queue_blinded_storage_node( + &self, + account: B256, + path: Nibbles, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.storage_work_tx + .send(StorageWorkerJob::BlindedStorageNode { account, path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("storage workers unavailable")) + })?; + + Ok(rx) + } + + /// Internal: Queue blinded account node request + fn queue_blinded_account_node( + &self, + path: Nibbles, + ) -> Result, ProviderError> { + let (tx, rx) = channel(); + self.account_work_tx + .send(AccountWorkerJob::BlindedAccountNode { path, result_sender: tx }) + .map_err(|_| { + ProviderError::other(std::io::Error::other("account workers unavailable")) + })?; + + Ok(rx) } } -impl TrieNodeProviderFactory for ProofTaskManagerHandle { - type AccountNodeProvider = ProofTaskTrieNodeProvider; - type StorageNodeProvider = ProofTaskTrieNodeProvider; +impl TrieNodeProviderFactory for ProofWorkerHandle { + type AccountNodeProvider = ProofTaskTrieNodeProvider; + type StorageNodeProvider = ProofTaskTrieNodeProvider; fn account_node_provider(&self) -> Self::AccountNodeProvider { - ProofTaskTrieNodeProvider::AccountNode { sender: self.sender.clone() } + ProofTaskTrieNodeProvider::AccountNode { handle: self.clone() } } fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { - ProofTaskTrieNodeProvider::StorageNode { account, sender: self.sender.clone() } + ProofTaskTrieNodeProvider::StorageNode { account, handle: self.clone() } } } /// Trie node provider for retrieving trie nodes by path. #[derive(Debug)] -pub enum ProofTaskTrieNodeProvider { +pub enum ProofTaskTrieNodeProvider { /// Blinded account trie node provider. AccountNode { - /// Sender to the proof task. - sender: Sender>, + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, }, /// Blinded storage trie node provider. StorageNode { /// Target account. account: B256, - /// Sender to the proof task. - sender: Sender>, + /// Handle to the proof worker pools. + handle: ProofWorkerHandle, }, } -impl TrieNodeProvider for ProofTaskTrieNodeProvider { +impl TrieNodeProvider for ProofTaskTrieNodeProvider { fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { - let (tx, rx) = channel(); match self { - Self::AccountNode { sender } => { - let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedAccountNode(*path, tx), - )); + Self::AccountNode { handle } => { + let rx = handle + .queue_blinded_account_node(*path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } - Self::StorageNode { sender, account } => { - let _ = sender.send(ProofTaskMessage::QueueTask( - ProofTaskKind::BlindedStorageNode(*account, *path, tx), - )); + Self::StorageNode { handle, account } => { + let rx = handle + .queue_blinded_storage_node(*account, *path) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + rx.recv().map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))? } } + } +} - rx.recv().unwrap() +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::map::B256Map; + use reth_provider::{providers::ConsistentDbView, test_utils::create_test_provider_factory}; + use reth_trie_common::{ + prefix_set::TriePrefixSetsMut, updates::TrieUpdatesSorted, HashedAccountsSorted, + HashedPostStateSorted, + }; + use std::sync::Arc; + use tokio::{runtime::Builder, task}; + + fn test_ctx() -> ProofTaskCtx { + ProofTaskCtx::new( + Arc::new(TrieUpdatesSorted::default()), + Arc::new(HashedPostStateSorted::new( + HashedAccountsSorted::default(), + B256Map::default(), + )), + Arc::new(TriePrefixSetsMut::default()), + ) + } + + /// Ensures `ProofWorkerHandle::new` spawns workers correctly. + #[test] + fn spawn_proof_workers_creates_handle() { + let runtime = Builder::new_multi_thread().worker_threads(1).enable_all().build().unwrap(); + runtime.block_on(async { + let handle = tokio::runtime::Handle::current(); + let factory = create_test_provider_factory(); + let view = ConsistentDbView::new(factory, None); + let ctx = test_ctx(); + + let proof_handle = ProofWorkerHandle::new(handle.clone(), view, ctx, 5, 3); + + // Verify handle can be cloned + let _cloned_handle = proof_handle.clone(); + + // Workers shut down automatically when handle is dropped + drop(proof_handle); + task::yield_now().await; + }); } } diff --git a/crates/trie/parallel/src/proof_task_metrics.rs b/crates/trie/parallel/src/proof_task_metrics.rs index cdb59d078d8..6492e28d12d 100644 --- a/crates/trie/parallel/src/proof_task_metrics.rs +++ b/crates/trie/parallel/src/proof_task_metrics.rs @@ -1,24 +1,5 @@ use reth_metrics::{metrics::Histogram, Metrics}; -/// Metrics for blinded node fetching for the duration of the proof task manager. -#[derive(Clone, Debug, Default)] -pub struct ProofTaskMetrics { - /// The actual metrics for blinded nodes. - pub task_metrics: ProofTaskTrieMetrics, - /// Count of blinded account node requests. - pub account_nodes: usize, - /// Count of blinded storage node requests. - pub storage_nodes: usize, -} - -impl ProofTaskMetrics { - /// Record the blinded node counts into the histograms. - pub fn record(&self) { - self.task_metrics.record_account_nodes(self.account_nodes); - self.task_metrics.record_storage_nodes(self.storage_nodes); - } -} - /// Metrics for the proof task. #[derive(Clone, Metrics)] #[metrics(scope = "trie.proof_task")] diff --git a/crates/trie/parallel/src/storage_root_targets.rs b/crates/trie/parallel/src/storage_root_targets.rs index f844b70fca5..0c6d9f43498 100644 --- a/crates/trie/parallel/src/storage_root_targets.rs +++ b/crates/trie/parallel/src/storage_root_targets.rs @@ -24,6 +24,23 @@ impl StorageRootTargets { .collect(), ) } + + /// Returns the total number of unique storage root targets without allocating new maps. + pub fn count( + account_prefix_set: &PrefixSet, + storage_prefix_sets: &B256Map, + ) -> usize { + let mut count = storage_prefix_sets.len(); + + for nibbles in account_prefix_set { + let hashed_address = B256::from_slice(&nibbles.pack()); + if !storage_prefix_sets.contains_key(&hashed_address) { + count += 1; + } + } + + count + } } impl IntoIterator for StorageRootTargets { diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index d973d705de2..50c9a79bd05 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -623,51 +623,19 @@ impl SparseTrieInterface for ParallelSparseTrie { "Branch node has only one child", ); - let remaining_child_subtrie = self.subtrie_for_path_mut(&remaining_child_path); - // If the remaining child node is not yet revealed then we have to reveal it here, // otherwise it's not possible to know how to collapse the branch. - let remaining_child_node = - match remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() { - SparseNode::Hash(_) => { - debug!( - target: "trie::parallel_sparse", - child_path = ?remaining_child_path, - leaf_full_path = ?full_path, - "Branch node child not revealed in remove_leaf, falling back to db", - ); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.trie_node(&remaining_child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::parallel_sparse", - ?remaining_child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - remaining_child_subtrie.reveal_node( - remaining_child_path, - &decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - remaining_child_subtrie.nodes.get(&remaining_child_path).unwrap() - } else { - return Err(SparseTrieErrorKind::NodeNotFoundInProvider { - path: remaining_child_path, - } - .into()) - } - } - node => node, - }; + let remaining_child_node = self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_child_path, + true, // recurse_into_extension + )?; let (new_branch_node, remove_child) = Self::branch_changes_on_leaf_removal( branch_path, &remaining_child_path, - remaining_child_node, + &remaining_child_node, ); if remove_child { @@ -1228,6 +1196,90 @@ impl ParallelSparseTrie { } } + /// Called when a leaf is removed on a branch which has only one other remaining child. That + /// child must be revealed in order to properly collapse the branch. + /// + /// If `recurse_into_extension` is true, and the remaining child is an extension node, then its + /// child will be ensured to be revealed as well. + /// + /// ## Returns + /// + /// The node of the remaining child, whether it was already revealed or not. + fn reveal_remaining_child_on_leaf_removal( + &mut self, + provider: P, + full_path: &Nibbles, // only needed for logs + remaining_child_path: &Nibbles, + recurse_into_extension: bool, + ) -> SparseTrieResult { + let remaining_child_subtrie = self.subtrie_for_path_mut(remaining_child_path); + + let remaining_child_node = + match remaining_child_subtrie.nodes.get(remaining_child_path).unwrap() { + SparseNode::Hash(_) => { + debug!( + target: "trie::parallel_sparse", + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Node child not revealed in remove_leaf, falling back to db", + ); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(remaining_child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + remaining_child_subtrie.reveal_node( + *remaining_child_path, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + remaining_child_subtrie.nodes.get(remaining_child_path).unwrap().clone() + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: *remaining_child_path, + } + .into()) + } + } + node => node.clone(), + }; + + // If `recurse_into_extension` is true, and the remaining child is an extension node, then + // its child will be ensured to be revealed as well. This is required for generation of + // trie updates; without revealing the grandchild branch it's not always possible to know + // if the tree mask bit should be set for the child extension on its parent branch. + if let SparseNode::Extension { key, .. } = &remaining_child_node && + recurse_into_extension + { + let mut remaining_grandchild_path = *remaining_child_path; + remaining_grandchild_path.extend(key); + + trace!( + target: "trie::parallel_sparse", + remaining_grandchild_path = ?remaining_grandchild_path, + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Revealing child of extension node, which is the last remaining child of the branch" + ); + + self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_grandchild_path, + false, // recurse_into_extension + )?; + } + + Ok(remaining_child_node) + } + /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to /// the given `updates` set. If the given set is None then this is a no-op. fn apply_subtrie_update_actions( @@ -4076,6 +4128,185 @@ mod tests { ); } + #[test] + fn test_remove_leaf_remaining_extension_node_child_is_revealed() { + let branch_path = Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7]); + let removed_branch_path = Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2]); + + // Convert the logs into reveal_nodes call on a fresh ParallelSparseTrie + let nodes = vec![ + // Branch at 0x4f8807 + RevealedSparseNode { + path: branch_path, + node: { + TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::from(hex!( + "dede882d52f0e0eddfb5b89293a10c87468b4a73acd0d4ae550054a92353f6d5" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "8746f18e465e2eed16117306b6f2eef30bc9d2978aee4a7838255e39c41a3222" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "35a4ea861548af5f0262a9b6d619b4fc88fce6531cbd004eab1530a73f34bbb1" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "47d5c2bf9eea5c1ee027e4740c2b86159074a27d52fd2f6a8a8c86c77e48006f" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "eb76a359b216e1d86b1f2803692a9fe8c3d3f97a9fe6a82b396e30344febc0c1" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "437656f2697f167b23e33cb94acc8550128cfd647fc1579d61e982cb7616b8bc" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "45a1ac2faf15ea8a4da6f921475974e0379f39c3d08166242255a567fa88ce6c" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "7dbb299d714d3dfa593f53bc1b8c66d5c401c30a0b5587b01254a56330361395" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "ae407eb14a74ed951c9949c1867fb9ee9ba5d5b7e03769eaf3f29c687d080429" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "768d0fe1003f0e85d3bc76e4a1fa0827f63b10ca9bca52d56c2b1cceb8eb8b08" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "e5127935143493d5094f4da6e4f7f5a0f62d524fbb61e7bb9fb63d8a166db0f3" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "7f3698297308664fbc1b9e2c41d097fbd57d8f364c394f6ad7c71b10291fbf42" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "4a2bc7e19cec63cb5ef5754add0208959b50bcc79f13a22a370f77b277dbe6db" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "40764b8c48de59258e62a3371909a107e76e1b5e847cfa94dbc857e9fd205103" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "2985dca29a7616920d95c43ab62eb013a40e6a0c88c284471e4c3bd22f3b9b25" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "1b6511f7a385e79477239f7dd4a49f52082ecac05aa5bd0de18b1d55fe69d10c" + ))), + ], + TrieMask::new(0b1111111111111111), + )) + }, + masks: TrieMasks { + hash_mask: Some(TrieMask::new(0b1111111111111111)), + tree_mask: Some(TrieMask::new(0b0011110100100101)), + }, + }, + // Branch at 0x4f88072 + RevealedSparseNode { + path: removed_branch_path, + node: { + let stack = vec![ + RlpNode::word_rlp(&B256::from(hex!( + "15fd4993a41feff1af3b629b32572ab05acddd97c681d82ec2eb89c8a8e3ab9e" + ))), + RlpNode::word_rlp(&B256::from(hex!( + "a272b0b94ced4e6ec7adb41719850cf4a167ad8711d0dda6a810d129258a0d94" + ))), + ]; + let branch_node = BranchNode::new(stack, TrieMask::new(0b0001000000000100)); + TrieNode::Branch(branch_node) + }, + masks: TrieMasks { + hash_mask: Some(TrieMask::new(0b0000000000000000)), + tree_mask: Some(TrieMask::new(0b0000000000000100)), + }, + }, + // Extension at 0x4f880722 + RevealedSparseNode { + path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0x2]), + node: { + let extension_node = ExtensionNode::new( + Nibbles::from_nibbles([0x6]), + RlpNode::word_rlp(&B256::from(hex!( + "56fab2b106a97eae9c7197f86d03bca292da6e0ac725b783082f7d950cc4e0fc" + ))), + ); + TrieNode::Extension(extension_node) + }, + masks: TrieMasks { hash_mask: None, tree_mask: None }, + }, + // Leaf at 0x4f88072c + RevealedSparseNode { + path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0xc]), + node: { + let leaf_node = LeafNode::new( + Nibbles::from_nibbles([ + 0x0, 0x7, 0x7, 0xf, 0x8, 0x6, 0x6, 0x1, 0x3, 0x0, 0x8, 0x8, 0xd, 0xf, + 0xc, 0xa, 0xe, 0x6, 0x4, 0x8, 0xa, 0xb, 0xe, 0x8, 0x3, 0x1, 0xf, 0xa, + 0xd, 0xc, 0xa, 0x5, 0x5, 0xa, 0xd, 0x4, 0x3, 0xa, 0xb, 0x1, 0x6, 0x5, + 0xd, 0x1, 0x6, 0x8, 0x0, 0xd, 0xd, 0x5, 0x6, 0x7, 0xb, 0x5, 0xd, 0x6, + ]), + hex::decode("8468d3971d").unwrap(), + ); + TrieNode::Leaf(leaf_node) + }, + masks: TrieMasks { hash_mask: None, tree_mask: None }, + }, + ]; + + // Create a fresh ParallelSparseTrie + let mut trie = ParallelSparseTrie::from_root( + TrieNode::Extension(ExtensionNode::new( + Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7]), + RlpNode::word_rlp(&B256::from(hex!( + "56fab2b106a97eae9c7197f86d03bca292da6e0ac725b783082f7d950cc4e0fc" + ))), + )), + TrieMasks::none(), + true, + ) + .unwrap(); + + // Call reveal_nodes + trie.reveal_nodes(nodes).unwrap(); + + // Remove the leaf at "0x4f88072c077f86613088dfcae648abe831fadca55ad43ab165d1680dd567b5d6" + let leaf_key = Nibbles::from_nibbles([ + 0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0xc, 0x0, 0x7, 0x7, 0xf, 0x8, 0x6, 0x6, 0x1, 0x3, + 0x0, 0x8, 0x8, 0xd, 0xf, 0xc, 0xa, 0xe, 0x6, 0x4, 0x8, 0xa, 0xb, 0xe, 0x8, 0x3, 0x1, + 0xf, 0xa, 0xd, 0xc, 0xa, 0x5, 0x5, 0xa, 0xd, 0x4, 0x3, 0xa, 0xb, 0x1, 0x6, 0x5, 0xd, + 0x1, 0x6, 0x8, 0x0, 0xd, 0xd, 0x5, 0x6, 0x7, 0xb, 0x5, 0xd, 0x6, + ]); + + let mut provider = MockTrieNodeProvider::new(); + let revealed_branch = create_branch_node_with_children(&[], []); + let mut encoded = Vec::new(); + revealed_branch.encode(&mut encoded); + provider.add_revealed_node( + Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0x2, 0x6]), + RevealedNode { + node: encoded.into(), + tree_mask: None, + // Give it a fake hashmask so that it appears like it will be stored in the db + hash_mask: Some(TrieMask::new(0b1111)), + }, + ); + + trie.remove_leaf(&leaf_key, provider).unwrap(); + + // Calculate root so that updates are calculated. + trie.root(); + + // Take updates and assert they are correct + let updates = trie.take_updates(); + assert_eq!( + updates.removed_nodes.into_iter().collect::>(), + vec![removed_branch_path] + ); + assert_eq!(updates.updated_nodes.len(), 1); + let updated_node = updates.updated_nodes.get(&branch_path).unwrap(); + + // Second bit must be set, indicating that the extension's child is in the db + assert_eq!(updated_node.tree_mask, TrieMask::new(0b011110100100101),) + } + #[test] fn test_parallel_sparse_trie_root() { // Step 1: Create the trie structure @@ -4764,7 +4995,7 @@ mod tests { state.clone(), trie_cursor.account_trie_cursor().unwrap(), Default::default(), - state.keys().copied().collect::>(), + state.keys().copied(), ); // Write trie updates to the database @@ -4809,7 +5040,7 @@ mod tests { .iter() .map(|nibbles| B256::from_slice(&nibbles.pack())) .collect(), - state.keys().copied().collect::>(), + state.keys().copied(), ); // Write trie updates to the database diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 76dadc8fc9c..89a23851e28 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -821,38 +821,17 @@ impl SparseTrieInterface for SerialSparseTrie { trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child"); - if self.nodes.get(&child_path).unwrap().is_hash() { - debug!( - target: "trie::sparse", - ?child_path, - leaf_full_path = ?full_path, - "Branch node child not revealed in remove_leaf, falling back to db", - ); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.trie_node(&child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::sparse", - ?child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - self.reveal_node( - child_path, - decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - } - } - - // Get the only child node. - let child = self.nodes.get(&child_path).unwrap(); + // If the remaining child node is not yet revealed then we have to reveal + // it here, otherwise it's not possible to know how to collapse the branch. + let child = self.reveal_remaining_child_on_leaf_removal( + &provider, + full_path, + &child_path, + true, // recurse_into_extension + )?; let mut delete_child = false; - let new_node = match child { + let new_node = match &child { SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), &SparseNode::Hash(hash) => { return Err(SparseTrieErrorKind::BlindedNode { @@ -1256,6 +1235,87 @@ impl SerialSparseTrie { Ok(nodes) } + /// Called when a leaf is removed on a branch which has only one other remaining child. That + /// child must be revealed in order to properly collapse the branch. + /// + /// If `recurse_into_extension` is true, and the remaining child is an extension node, then its + /// child will be ensured to be revealed as well. + /// + /// ## Returns + /// + /// The node of the remaining child, whether it was already revealed or not. + fn reveal_remaining_child_on_leaf_removal( + &mut self, + provider: P, + full_path: &Nibbles, // only needed for logs + remaining_child_path: &Nibbles, + recurse_into_extension: bool, + ) -> SparseTrieResult { + let remaining_child_node = match self.nodes.get(remaining_child_path).unwrap() { + SparseNode::Hash(_) => { + debug!( + target: "trie::parallel_sparse", + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Node child not revealed in remove_leaf, falling back to db", + ); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.trie_node(remaining_child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?remaining_child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + self.reveal_node( + *remaining_child_path, + decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + self.nodes.get(remaining_child_path).unwrap().clone() + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: *remaining_child_path, + } + .into()) + } + } + node => node.clone(), + }; + + // If `recurse_into_extension` is true, and the remaining child is an extension node, then + // its child will be ensured to be revealed as well. This is required for generation of + // trie updates; without revealing the grandchild branch it's not always possible to know + // if the tree mask bit should be set for the child extension on its parent branch. + if let SparseNode::Extension { key, .. } = &remaining_child_node && + recurse_into_extension + { + let mut remaining_grandchild_path = *remaining_child_path; + remaining_grandchild_path.extend(key); + + trace!( + target: "trie::parallel_sparse", + remaining_grandchild_path = ?remaining_grandchild_path, + child_path = ?remaining_child_path, + leaf_full_path = ?full_path, + "Revealing child of extension node, which is the last remaining child of the branch" + ); + + self.reveal_remaining_child_on_leaf_removal( + provider, + full_path, + &remaining_grandchild_path, + false, // recurse_into_extension + )?; + } + + Ok(remaining_child_node) + } + /// Recalculates and updates the RLP hashes of nodes deeper than or equal to the specified /// `depth`. /// @@ -2971,7 +3031,7 @@ mod tests { state.clone(), trie_cursor.account_trie_cursor().unwrap(), Default::default(), - state.keys().copied().collect::>(), + state.keys().copied(), ); // Write trie updates to the database @@ -3013,7 +3073,7 @@ mod tests { .iter() .map(|nibbles| B256::from_slice(&nibbles.pack())) .collect(), - state.keys().copied().collect::>(), + state.keys().copied(), ); // Write trie updates to the database diff --git a/crates/trie/trie/src/verify.rs b/crates/trie/trie/src/verify.rs index 5f2260bc7dc..96059211458 100644 --- a/crates/trie/trie/src/verify.rs +++ b/crates/trie/trie/src/verify.rs @@ -400,9 +400,8 @@ impl Verifier { // need to validate that all accounts coming after it have empty storages. let prev_account = *prev_account; - // Calculate the max possible account address. - let mut max_account = B256::ZERO; - max_account.reverse(); + // Calculate the max possible account address (all bits set). + let max_account = B256::from([0xFFu8; 32]); self.verify_empty_storages(prev_account, max_account, false, true)?; } diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 9a32d647876..5f0ccfca01f 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -113,4 +113,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index b449f118168..849f4ec5bab 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -99,4 +99,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index 2553a1480f9..3b28b43162a 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -164,4 +164,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index ba12fd1b2f5..13e2c2bd39d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -116,4 +116,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 79e324021bf..5c19682e8b6 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -108,4 +108,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index 843f5253c9a..0e5526affe5 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -107,4 +107,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 3af272ff362..72c3108fcf3 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -110,4 +110,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index f440545f129..fadd0613ca8 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -143,4 +143,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index 64552318a21..0f9ddba9ee9 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -106,4 +106,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index c7fc831b764..942eda79998 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -108,4 +108,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 48fd6c889c6..b7ccf9e7d3d 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -116,4 +116,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index af21819a452..28d7c343e94 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -116,4 +116,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index cff6c7eed5e..3f9ac94c5c5 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -149,4 +149,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index 1dd3279a797..f6714898b35 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -103,4 +103,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index f5058265196..3a6bfae1d3c 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -106,4 +106,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 1f2c50908dc..a4939c3ef93 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -116,4 +116,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index a683749fcdf..7b3766b4e8a 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -103,4 +103,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 973dce74a22..74296538855 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -161,4 +161,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 6bc27381a24..a6dbbcb1b27 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -102,4 +102,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index 896f7f34d08..ee65abbeb42 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -167,4 +167,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index a783067d193..ae17ab91e0e 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -162,4 +162,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 0914444e108..f92b52ec591 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -163,4 +163,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 8c0cfa6e4d3..03d1e7b883b 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -186,4 +186,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index b1ac27e8ba7..993ae2dcd85 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -151,4 +151,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index fc9d6b317a6..8ac6156c664 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -39,7 +39,7 @@ Options: Print help (see a summary with '-h') Metrics: - --metrics + --metrics Enable Prometheus metrics. The metrics will be served at the given interface and port. @@ -248,6 +248,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + RPC: --http Enable the HTTP-RPC server @@ -864,6 +867,12 @@ Engine: --engine.allow-unwind-canonical-header Allow unwinding canonical header to ancestor during forkchoice updates. See `TreeConfig::unwind_canonical_header` for more details + --engine.storage-worker-count + Configure the number of storage proof workers in the Tokio blocking pool. If not specified, defaults to 2x available parallelism, clamped between 2 and 64 + + --engine.account-worker-count + Configure the number of account proof workers in the Tokio blocking pool. If not specified, defaults to the same count as storage workers + ERA: --era.enable Enable import from ERA1 files @@ -984,4 +993,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 6b24d9d326b..9693e20e756 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -100,4 +100,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index ecd6ccf8141..ae0f3d293d1 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -206,6 +206,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -317,4 +320,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 2a0a5b6a808..d1bf7c69870 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -111,4 +111,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index fee957e3385..9e542916d4c 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -206,6 +206,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -317,4 +320,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index dbd7ca91b34..75ab654964f 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -97,4 +97,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index ac123d47285..7152b222fb4 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -97,4 +97,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index ce6bc399d8e..f54f6687805 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -151,4 +151,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index ec5e048b5cd..973ac79f29f 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -164,4 +164,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index bc693f7e463..f382eb2081e 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -100,4 +100,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index a36545638ce..e2ba5751b52 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -165,4 +165,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index 97211934295..01b4f61f29f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -158,4 +158,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index c1459ee5498..18f44ae13ed 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -115,4 +115,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index 4f39dccac12..de0f693ed57 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -115,4 +115,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index f5d6a07b09a..aaff755796a 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -115,4 +115,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index fce03ffa753..2ff7b22b76b 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -115,4 +115,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index 76ce30a2f79..2af69a053d6 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -302,6 +302,9 @@ Networking: --required-block-hashes Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + Logging: --log.stdout.format The format to use for logs written to stdout @@ -383,4 +386,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 1a3fd02cae8..977d949a9b7 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -159,4 +159,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index bed98899e19..0b60467c413 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -107,4 +107,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index bcfc87cf3e5..07632cf8285 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -107,4 +107,21 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults to `http://localhost:4318/v1/traces`. + + Example: --tracing-otlp=http://collector:4318/v1/traces + + --tracing-otlp-level + Set the minimum log level for OTLP traces. + + Valid values: ERROR, WARN, INFO, DEBUG, TRACE + + Defaults to TRACE if not specified. + + [default: TRACE] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/run/monitoring.mdx b/docs/vocs/docs/pages/run/monitoring.mdx index 30ce967bb10..d6c73436098 100644 --- a/docs/vocs/docs/pages/run/monitoring.mdx +++ b/docs/vocs/docs/pages/run/monitoring.mdx @@ -10,6 +10,12 @@ Reth exposes a number of metrics which can be enabled by adding the `--metrics` reth node --metrics 127.0.0.1:9001 ``` +Alternatively, you can export metrics to an OpenTelemetry collector using `--otlp-metrics`: + +```bash +reth node --otlp-metrics 127.0.0.1:4318 +``` + Now, as the node is running, you can `curl` the endpoint you provided to the `--metrics` flag to get a text dump of the metrics at that time: ```bash diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 5b271d7ea8e..46a465ca4a4 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -3931,7 +3931,7 @@ "hide": false, "includeNullMetadata": true, "instant": false, - "legendFormat": "Precompile cache hits", + "legendFormat": "{{address}}", "range": true, "refId": "A", "useBackend": false diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index a72b2c44487..1d93226dd6a 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -8,7 +8,7 @@ use alloy_evm::{ block::{BlockExecutorFactory, BlockExecutorFor, ExecutableTx}, eth::{EthBlockExecutionCtx, EthBlockExecutor}, precompiles::PrecompilesMap, - revm::context::result::ResultAndState, + revm::context::{result::ResultAndState, Block as _}, EthEvm, EthEvmFactory, }; use alloy_sol_macro::sol; @@ -271,7 +271,7 @@ pub fn apply_withdrawals_contract_call( // Clean-up post system tx context state.remove(&SYSTEM_ADDRESS); - state.remove(&evm.block().beneficiary); + state.remove(&evm.block().beneficiary()); evm.db_mut().commit(state); diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index f700cf9e89a..c5441a2b388 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -33,7 +33,7 @@ async fn main() -> eyre::Result<()> { let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) .testing_node(tasks.executor()) .node(EthereumNode::default()) - .launch() + .launch_with_debug_capabilities() .await?; let mut notifications = node.provider.canonical_state_stream(); diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index b5e69670ec7..e32f0be6bd5 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -18,7 +18,7 @@ use reth_ethereum::{ evm::{ primitives::{Database, EvmEnv}, revm::{ - context::{Context, TxEnv}, + context::{BlockEnv, Context, TxEnv}, context_interface::result::{EVMError, HaltReason}, inspector::{Inspector, NoOpInspector}, interpreter::interpreter::EthInterpreter, @@ -54,6 +54,7 @@ impl EvmFactory for MyEvmFactory { type HaltReason = HaltReason; type Context = EthEvmContext; type Spec = SpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm(&self, db: DB, input: EvmEnv) -> Self::Evm { diff --git a/examples/custom-node/src/evm/alloy.rs b/examples/custom-node/src/evm/alloy.rs index 6071a2c6dd8..d8df842cfc5 100644 --- a/examples/custom-node/src/evm/alloy.rs +++ b/examples/custom-node/src/evm/alloy.rs @@ -40,6 +40,7 @@ where type Error = EVMError; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = P; type Inspector = I; @@ -103,6 +104,7 @@ impl EvmFactory for CustomEvmFactory { type Error = EVMError; type HaltReason = OpHaltReason; type Spec = OpSpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm( diff --git a/examples/custom-node/src/evm/env.rs b/examples/custom-node/src/evm/env.rs index 5508ec4e6d0..53a2b4e3f15 100644 --- a/examples/custom-node/src/evm/env.rs +++ b/examples/custom-node/src/evm/env.rs @@ -1,6 +1,7 @@ use crate::primitives::{CustomTransaction, TxPayment}; use alloy_eips::{eip2930::AccessList, Typed2718}; use alloy_evm::{FromRecoveredTx, FromTxWithEncoded, IntoTxEnv}; +use alloy_op_evm::block::OpTxEnv; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use op_alloy_consensus::OpTxEnvelope; use op_revm::OpTransaction; @@ -328,3 +329,12 @@ impl IntoTxEnv for CustomTxEnv { self } } + +impl OpTxEnv for CustomTxEnv { + fn encoded_bytes(&self) -> Option<&Bytes> { + match self { + Self::Op(tx) => tx.encoded_bytes(), + Self::Payment(_) => None, + } + } +} diff --git a/examples/exex-subscription/src/main.rs b/examples/exex-subscription/src/main.rs index eb7ffaaf754..2f0c38f3852 100644 --- a/examples/exex-subscription/src/main.rs +++ b/examples/exex-subscription/src/main.rs @@ -4,7 +4,6 @@ //! requested address. #[allow(dead_code)] use alloy_primitives::{Address, U256}; -use clap::Parser; use futures::TryStreamExt; use jsonrpsee::{ core::SubscriptionResult, proc_macros::rpc, tracing, PendingSubscriptionSink, @@ -166,14 +165,8 @@ async fn my_exex( Ok(()) } -#[derive(Parser, Debug)] -struct Args { - #[arg(long)] - enable_ext: bool, -} - fn main() -> eyre::Result<()> { - reth_ethereum::cli::Cli::parse_args().run(|builder, _args| async move { + reth_ethereum::cli::Cli::parse_args().run(|builder, _| async move { let (subscriptions_tx, subscriptions_rx) = mpsc::unbounded_channel::(); let rpc = StorageWatcherRpc::new(subscriptions_tx.clone()); diff --git a/examples/node-custom-rpc/src/main.rs b/examples/node-custom-rpc/src/main.rs index 3c7c9269f58..2af789a989c 100644 --- a/examples/node-custom-rpc/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -53,7 +53,7 @@ fn main() { Ok(()) }) // launch the node with custom rpc - .launch() + .launch_with_debug_capabilities() .await?; handle.wait_for_node_exit().await diff --git a/examples/precompile-cache/src/main.rs b/examples/precompile-cache/src/main.rs index dcaa886d736..fe748db4636 100644 --- a/examples/precompile-cache/src/main.rs +++ b/examples/precompile-cache/src/main.rs @@ -16,7 +16,7 @@ use reth_ethereum::{ evm::{ primitives::{Database, EvmEnv}, revm::{ - context::{Context, TxEnv}, + context::{BlockEnv, Context, TxEnv}, context_interface::result::{EVMError, HaltReason}, inspector::{Inspector, NoOpInspector}, interpreter::interpreter::EthInterpreter, @@ -69,6 +69,7 @@ impl EvmFactory for MyEvmFactory { type HaltReason = HaltReason; type Context = EthEvmContext; type Spec = SpecId; + type BlockEnv = BlockEnv; type Precompiles = PrecompilesMap; fn create_evm(&self, db: DB, input: EvmEnv) -> Self::Evm { @@ -176,7 +177,7 @@ where async fn build_evm(self, ctx: &BuilderContext) -> eyre::Result { let evm_config = EthEvmConfig::new_with_evm_factory( ctx.chain_spec(), - MyEvmFactory { precompile_cache: self.precompile_cache.clone() }, + MyEvmFactory { precompile_cache: self.precompile_cache }, ); Ok(evm_config) } diff --git a/testing/ef-tests/tests/tests.rs b/testing/ef-tests/tests/tests.rs index 0961817e901..2728246901a 100644 --- a/testing/ef-tests/tests/tests.rs +++ b/testing/ef-tests/tests/tests.rs @@ -93,7 +93,7 @@ macro_rules! blockchain_test { .join("ethereum-tests") .join("BlockchainTests"); - BlockchainTests::new(suite_path).run_only(&format!("{}", stringify!($dir))); + BlockchainTests::new(suite_path).run_only(stringify!($dir)); } }; }