diff --git a/Cargo.lock b/Cargo.lock index 2c6e6bc1c..fc5d1ac4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1032,7 +1032,7 @@ checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "api-types" version = "0.1.0" -source = "git+https://github.com/Galxe/gravity-aptos?rev=7b2d7949583169cc5c997856b4a0d17fec56ebf6#7b2d7949583169cc5c997856b4a0d17fec56ebf6" +source = "git+https://github.com/Galxe/gravity-aptos?rev=ee73eef#ee73eefa89cbd557820595511abbfc5a20c9fa8a" dependencies = [ "anyhow", "async-trait", @@ -9987,9 +9987,11 @@ dependencies = [ "eyre", "gravity-primitives", "gravity-storage", + "grevm", "hex", "metrics", "once_cell", + "parking_lot", "rand 0.9.2", "rayon", "reth-chain-state", @@ -14803,3 +14805,8 @@ dependencies = [ "cc", "pkg-config", ] + +[[patch.unused]] +name = "alloy-tx-macros" +version = "1.0.37" +source = "git+https://github.com/alloy-rs/alloy?tag=v1.0.37#8f6c1489f89e90649dac378008d8913e3b8a4c98" diff --git a/Cargo.toml b/Cargo.toml index ccec43e0b..fe6c9244e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -377,7 +377,7 @@ codegen-units = 1 [workspace.dependencies] # reth -gravity-api-types = { package = "api-types", git = "https://github.com/Galxe/gravity-aptos", rev = "7b2d7949583169cc5c997856b4a0d17fec56ebf6" } +gravity-api-types = { package = "api-types", git = "https://github.com/Galxe/gravity-aptos", rev = "ee73eef" } op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } reth-storage-rpc-provider = { path = "crates/storage/rpc-provider" } @@ -773,7 +773,8 @@ visibility = "0.1.1" walkdir = "2.3.3" vergen-git2 = "9.1.0" -# [patch.crates-io] +[patch.crates-io] +alloy-tx-macros = { git = "https://github.com/alloy-rs/alloy", tag = "v1.0.37" } # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } diff --git a/crates/pipe-exec-layer-ext-v2/execute/Cargo.toml b/crates/pipe-exec-layer-ext-v2/execute/Cargo.toml index f578e4b61..b98fc5523 100644 --- a/crates/pipe-exec-layer-ext-v2/execute/Cargo.toml +++ b/crates/pipe-exec-layer-ext-v2/execute/Cargo.toml @@ -29,6 +29,7 @@ reth-rpc-eth-api.workspace = true revm-primitives.workspace = true gravity-storage.workspace = true gravity-primitives.workspace = true +grevm.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-eips.workspace = true @@ -43,6 +44,7 @@ revm.workspace = true # misc tracing.workspace = true +parking_lot.workspace = true reth-metrics.workspace = true metrics.workspace = true serde_json.workspace = true diff --git a/crates/pipe-exec-layer-ext-v2/execute/src/lib.rs b/crates/pipe-exec-layer-ext-v2/execute/src/lib.rs index 076574f4c..29cb2c294 100644 --- a/crates/pipe-exec-layer-ext-v2/execute/src/lib.rs +++ b/crates/pipe-exec-layer-ext-v2/execute/src/lib.rs @@ -2,6 +2,7 @@ #[macro_use] mod channel; mod metrics; +pub mod mint_precompile; pub mod onchain_config; use alloy_sol_types::SolEvent; @@ -23,11 +24,12 @@ use alloy_primitives::{ }; use alloy_rpc_types_eth::TransactionRequest; use gravity_primitives::get_gravity_config; +use grevm::ParallelState; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use reth_chain_state::{ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_ethereum_primitives::{Block, BlockBody, Receipt, TransactionSigned}; -use reth_evm::{ConfigureEvm, NextBlockEnvAttributes, ParallelDatabase}; +use reth_evm::{ConfigureEvm, Evm, NextBlockEnvAttributes, ParallelDatabase}; use reth_evm_ethereum::EthEvmConfig; use reth_execution_types::{BlockExecutionOutput, ExecutionOutcome}; use reth_pipe_exec_layer_event_bus::{ @@ -44,7 +46,7 @@ use reth_rpc_eth_api::RpcTypes; use revm::{ database::{states::bundle_state::BundleRetention, State}, state::AccountInfo, - DatabaseCommit, + Database, DatabaseCommit, }; use std::{ collections::BTreeMap, @@ -56,7 +58,7 @@ use std::{ }; use gravity_storage::GravityStorage; -use onchain_config::{transact_metadata_contract_call, OnchainConfigFetcher}; +use onchain_config::OnchainConfigFetcher; use reth_rpc_eth_api::helpers::EthCall; use reth_trie::{HashedPostState, KeccakKeyHasher}; use tokio::sync::{ @@ -65,12 +67,14 @@ use tokio::sync::{ }; use tracing::*; -use crate::onchain_config::{ - construct_validator_txns_envelope, - dkg::{convert_dkg_start_event_to_api, DKGStartEvent}, - observed_jwk::convert_into_api_provider_jwks, - types::ObservedJWKsUpdated, - SYSTEM_CALLER, +use crate::{ + mint_precompile::create_mint_token_precompile, + onchain_config::{ + construct_metadata_txn, construct_validator_txn_from_extra_data, + dkg::{convert_dkg_start_event_to_api, DKGStartEvent}, + types::DataRecorded, + transact_system_txn, SystemTxnResult, NATIVE_MINT_PRECOMPILE_ADDR, SYSTEM_CALLER, + }, }; /// Metadata about an executed block @@ -276,6 +280,21 @@ struct ExecuteOrderedBlockResult { epoch: u64, } +/// Result of system transaction execution +/// +/// System transactions may trigger an epoch change, which requires early return. +/// This enum represents both outcomes. +enum SystemTxnExecutionOutcome { + /// Normal execution completed, continue with block processing + Continue { + metadata_result: SystemTxnResult, + accumulated_state_changes: revm::state::EvmState, + validator_results: Vec, + }, + /// Epoch changed, return early with the result + EpochChanged(ExecuteOrderedBlockResult), +} + impl Core { fn epoch(&self) -> u64 { self.epoch.load(Ordering::Acquire) @@ -566,16 +585,215 @@ impl Core { (RecoveredBlock::new_unhashed(block, senders), txs_info) } + /// Execute all system transactions (metadata, DKG, JWK) sequentially + /// + /// This function encapsulates the execution of all system-level transactions + /// that must be processed before the parallel user transaction execution. + /// + /// Returns `SystemTxnExecutionOutcome::EpochChanged` if a new epoch was triggered, + /// otherwise returns `SystemTxnExecutionOutcome::Continue` with the results. + fn execute_system_transactions( + evm_config: &EthEvmConfig, + chain_spec: &ChainSpec, + state: &Arc, + evm_env: reth_evm::EvmEnv, + ordered_block: &OrderedBlock, + base_fee: u64, + epoch: u64, + block_id: B256, + parent_id: B256, + block_number: u64, + ) -> SystemTxnExecutionOutcome { + let mut inner_state = + State::builder().with_database_ref(state).with_bundle_update().build(); + let mut evm = evm_config.evm_with_env(&mut inner_state, evm_env); + + // Create shared state for precompile - keep a reference so we can extract changes later + let state_for_precompile = { + let report_db_metrics = get_gravity_config().report_db_metrics; + let parallel_state = ParallelState::new(state.clone(), true, report_db_metrics); + Arc::new(parking_lot::Mutex::new(parallel_state)) + }; + let state_for_precompile_ref = state_for_precompile.clone(); + let precompile = create_mint_token_precompile(state_for_precompile); + evm.precompiles_mut() + .apply_precompile(&NATIVE_MINT_PRECOMPILE_ADDR, move |_| Some(precompile)); + + // Get system caller nonce and gas price for constructing all system transactions + let system_call_account = + evm.db_mut().basic(SYSTEM_CALLER).unwrap().expect("SYSTEM_CALLER not exists"); + let gas_price = evm.block().basefee as u128; + let mut current_nonce = system_call_account.nonce; + // Construct and execute metadata transaction using unified entry point + let metadata_txn = construct_metadata_txn( + current_nonce, + gas_price, + ordered_block.timestamp_us, + ordered_block.proposer_index, + ); + current_nonce = metadata_txn.nonce() + 1; + + let (metadata_txn_result, metadata_state_changes) = + transact_system_txn(&mut evm, metadata_txn); + + // Commit metadata state changes immediately so subsequent txns see nonce update + evm.db_mut().commit(metadata_state_changes.clone()); + + // Accumulate state changes for returning to executor + let mut accumulated_state_changes = metadata_state_changes; + + // Check for epoch change + if let Some((new_epoch, validators)) = metadata_txn_result.emit_new_epoch() { + drop(evm); + assert_eq!(new_epoch, epoch + 1); + info!(target: "execute_ordered_block", + id=?block_id, + parent_id=?parent_id, + number=?block_number, + new_epoch=?new_epoch, + "emit new epoch, discard the block" + ); + inner_state.merge_transitions(BundleRetention::Reverts); + return SystemTxnExecutionOutcome::EpochChanged( + metadata_txn_result.into_executed_ordered_block_result( + chain_spec, + ordered_block, + base_fee, + inner_state.take_bundle(), + validators, + ), + ); + } + + debug!(target: "execute_ordered_block", + metadata_txn_result=?metadata_txn_result, + "metadata transaction result" + ); + + // Execute validator transactions (DKG and JWK) one by one + // DKG transactions are executed first since they may trigger epoch changes + let mut validator_txn_results: Vec = Vec::new(); + + // Sort extra_data: DKG first, then JWK + let mut sorted_extra_data: Vec<_> = ordered_block.extra_data.iter().collect(); + sorted_extra_data.sort_by_key(|data| match data { + ExtraDataType::DKG(_) => 0, // DKG comes first + ExtraDataType::JWK(_) => 1, // JWK comes second + }); + + for (index, extra_data) in sorted_extra_data.iter().enumerate() { + let is_dkg = matches!(extra_data, ExtraDataType::DKG(_)); + // TODO(gravity_lightman): should not panic + let txn = construct_validator_txn_from_extra_data(extra_data, current_nonce, gas_price) + .expect("Failed to construct validator transaction"); + current_nonce += 1; + + debug!(target: "execute_ordered_block", + index=?index, + nonce=?current_nonce, + is_dkg=?is_dkg, + block_number=?block_number, + "executing validator transaction one by one" + ); + + let (validator_result, validator_state_changes) = transact_system_txn(&mut evm, txn); + + // Commit state changes immediately so subsequent txns see nonce update + evm.db_mut().commit(validator_state_changes.clone()); + + // Merge state changes into accumulated changes + for (addr, account) in validator_state_changes { + accumulated_state_changes.insert(addr, account); + } + + // DKG transactions may trigger epoch change + if is_dkg { + if let Some((new_epoch, validators)) = validator_result.emit_new_epoch() { + drop(evm); + assert_eq!(new_epoch, epoch + 1); + info!(target: "execute_ordered_block", + id=?block_id, + parent_id=?parent_id, + number=?block_number, + new_epoch=?new_epoch, + "DKG triggered new epoch, discard the block" + ); + inner_state.merge_transitions(BundleRetention::Reverts); + return SystemTxnExecutionOutcome::EpochChanged( + validator_result.into_executed_ordered_block_result( + chain_spec, + ordered_block, + base_fee, + inner_state.take_bundle(), + validators, + ), + ); + } + } + + info!(target: "execute_ordered_block", + index=?index, + is_dkg=?is_dkg, + gas_used=?validator_result.result.gas_used(), + block_number=?block_number, + "validator transaction executed successfully" + ); + + validator_txn_results.push(validator_result); + } + + drop(evm); + + // Extract changes from precompile state and merge into accumulated_state_changes + { + let mut precompile_state = state_for_precompile_ref.lock(); + precompile_state.merge_transitions(BundleRetention::Reverts); + let precompile_bundle = precompile_state.take_bundle(); + + // Convert BundleState to EvmState and merge + for (address, account) in precompile_bundle.state { + if let Some(info) = account.info { + use revm::state::{Account, AccountStatus, EvmStorageSlot}; + accumulated_state_changes.insert( + address, + Account { + info, + storage: account + .storage + .into_iter() + .map(|(k, v)| (k, EvmStorageSlot::new(v.present_value, 0))) + .collect(), + status: AccountStatus::Touched, + transaction_id: 0, + }, + ); + } + } + } + + SystemTxnExecutionOutcome::Continue { + metadata_result: metadata_txn_result, + accumulated_state_changes, + validator_results: validator_txn_results, + } + } + /// Extract gravity events from execution receipts - /// Returns (gravity_events, epoch_change_result) where epoch_change_result is Some((new_epoch, - /// validators)) if epoch changed + /// Returns gravity_events containing DKG events and ObservedJWKsUpdated from DataRecorded events + /// TODO(gravity): Currently, it executes the entire block and then parses all logs from the whole block. + /// Theoretically, it could only parse metadata and validator transactions. fn extract_gravity_events_from_receipts( &self, receipts: &[Receipt], block_number: u64, - ) -> (Vec, Option<(u64, Bytes)>) { + epoch: u64, + ) -> Vec { + use std::collections::HashMap; + use gravity_api_types::on_chain_config::jwks::ProviderJWKs; + let mut gravity_events = vec![]; - let mut epoch_change_result = None; + // Map from (sourceType, sourceId) to latest nonce + let mut data_records: HashMap<(u32, alloy_primitives::U256), u128> = HashMap::new(); for receipt in receipts { debug!(target: "execute_ordered_block", @@ -584,35 +802,28 @@ impl Core { "extract gravity events from receipt" ); for log in &receipt.logs { - // Check for NewEpochEvent event (epoch change) - if let Ok(event) = crate::onchain_config::types::NewEpochEvent::decode_log(log) { - debug!(target: "execute_ordered_block", - number=?block_number, - new_epoch=?event.newEpoch, - "detected epoch change from NewEpochEvent" - ); - let validator_bytes = - crate::onchain_config::types::convert_active_validators_to_bcs( - &event.validatorSet, - ); - epoch_change_result = Some((event.newEpoch, validator_bytes)); - } - - if let Ok(event) = ObservedJWKsUpdated::decode_log(&log) { + // Parse DataRecorded events from NativeOracle + if let Ok(event) = DataRecorded::decode_log(&log) { info!(target: "execute_ordered_block", number=?block_number, - "observed jwks updated" + source_type=?event.sourceType, + source_id=?event.sourceId, + nonce=?event.nonce, + "data recorded event" ); - let api_jwks = event - .jwks - .iter() - .map(|jwk| convert_into_api_provider_jwks(jwk.clone())) - .collect::>(); - gravity_events.push(GravityEvent::ObservedJWKsUpdated( - event.epoch.try_into().unwrap(), - api_jwks, - )); + // Keep only the latest nonce for each (sourceType, sourceId) + let key = (event.sourceType, event.sourceId); + data_records + .entry(key) + .and_modify(|existing_nonce| { + if event.nonce > *existing_nonce { + *existing_nonce = event.nonce; + } + }) + .or_insert(event.nonce); } + + // Parse DKG events (unchanged) if let Ok(event) = DKGStartEvent::decode_log(&log) { info!(target: "execute_ordered_block", number=?block_number, @@ -623,7 +834,33 @@ impl Core { } } } - (gravity_events, epoch_change_result) + + // Convert collected DataRecorded events to ProviderJWKs + if !data_records.is_empty() { + let api_jwks: Vec = data_records + .into_iter() + .map(|((source_type, source_id), nonce)| { + // issuer format: "gravity://sourceType/sourceId" + let issuer = format!("gravity://{}/{}", source_type, source_id); + ProviderJWKs { + issuer: issuer.into_bytes(), + version: nonce as u64, // nonce as version + jwks: vec![], // return empty jwks + } + }) + .collect(); + + info!(target: "execute_ordered_block", + number=?block_number, + epoch=?epoch, + provider_count=?api_jwks.len(), + "constructed ProviderJWKs from DataRecorded events" + ); + + gravity_events.push(GravityEvent::ObservedJWKsUpdated(epoch, api_jwks)); + } + + gravity_events } fn execute_ordered_block( @@ -637,7 +874,7 @@ impl Core { assert_eq!(block_number, parent_header.number + 1); let epoch = ordered_block.epoch; - let state = self.storage.get_state_view().unwrap(); + let state = Arc::new(self.storage.get_state_view().unwrap()); let evm_env = self .evm_config @@ -658,69 +895,51 @@ impl Core { block_number=?block_number, ); let base_fee = evm_env.block_env.basefee; + // let mut evm = self.evm_config.evm_with_env(&mut state, evm_env); + // evm apply precompile + // for xx in xx { evm.execute_transaction(tx) } + // cases + + // Execute system transactions (metadata, DKG, JWK) sequentially + let (metadata_txn_result, accumulated_state_changes, validator_txn_results) = + match Self::execute_system_transactions( + &self.evm_config, + &self.chain_spec, + &state, + evm_env, + &ordered_block, + base_fee, + epoch, + block_id, + parent_id, + block_number, + ) { + SystemTxnExecutionOutcome::EpochChanged(result) => return result, + SystemTxnExecutionOutcome::Continue { + metadata_result, + accumulated_state_changes, + validator_results, + } => (metadata_result, accumulated_state_changes, validator_results), + }; - let (metadata_txn_result, state_changes) = { - let mut state = State::builder().with_database_ref(&state).with_bundle_update().build(); - let mut evm = self.evm_config.evm_with_env(&mut state, evm_env); - let (metadata_txn_result, state_changes) = transact_metadata_contract_call( - &mut evm, - ordered_block.timestamp_us, - ordered_block.proposer_index, - ); - drop(evm); - - if let Some((new_epoch, validators)) = metadata_txn_result.emit_new_epoch() { - // New epoch triggered, advance epoch and discard the block. - assert_eq!(new_epoch, epoch + 1); - info!(target: "execute_ordered_block", - id=?block_id, - parent_id=?parent_id, - number=?block_number, - new_epoch=?new_epoch, - "emit new epoch, discard the block" - ); - state.commit(state_changes); - state.merge_transitions(BundleRetention::Reverts); - return metadata_txn_result.into_executed_ordered_block_result( - &self.chain_spec, - &ordered_block, - base_fee, - state.take_bundle(), - validators, - ); - } - debug!(target: "execute_ordered_block", - metadata_txn_result=?metadata_txn_result, - "metadata transaction result" - ); - (metadata_txn_result, state_changes) - }; - - let validator_txns = if !ordered_block.extra_data.is_empty() { - construct_validator_txns_envelope( - &ordered_block.extra_data, - metadata_txn_result.txn.nonce(), - metadata_txn_result.txn.gas_price().expect("metadata txn gas price is not set"), - ) - .unwrap() - } else { - vec![] - }; - + // No longer pass validator_txns to create_block_for_executor since they are executed + // separately let (block, txs_info) = - self.create_block_for_executor(ordered_block, base_fee, &state, validator_txns); + self.create_block_for_executor(ordered_block, base_fee, &state, vec![]); info!(target: "execute_ordered_block", id=?block_id, parent_id=?parent_id, number=?block_number, num_txs=?block.transaction_count(), + validator_txn_count=?validator_txn_results.len(), "ready to execute block" ); let mut executor = self.evm_config.parallel_executor(state); - // Apply metadata transaction result to executor state - executor.commit_changes(state_changes); + // Apply all pre-executed transaction state changes (metadata + validator txns) to executor + // state + executor.commit_changes(accumulated_state_changes); let outcome = executor.execute(&block).unwrap_or_else(|err| { serde_json::to_writer_pretty( std::io::BufWriter::new(std::fs::File::create(format!("{block_id}.json")).unwrap()), @@ -747,34 +966,23 @@ impl Core { gravity_events: vec![], epoch, }; - metadata_txn_result.insert_to_executed_ordered_block_result(&mut result); + metadata_txn_result.insert_to_executed_ordered_block_result(&mut result, 0); + // Insert validator transaction results one by one after the metadata transaction + // Position 1 is right after the metadata transaction at position 0 + for (index, validator_result) in validator_txn_results.into_iter().enumerate() { + validator_result.insert_to_executed_ordered_block_result(&mut result, 1 + index); + } debug!(target: "execute_ordered_block", number=?result.block.number, receipts_len=?result.execution_output.receipts.len(), - "insert metadata transaction result to executed ordered block result" + "insert metadata and validator transaction results to executed ordered block result" ); - let (mut gravity_events, epoch_change_result) = self.extract_gravity_events_from_receipts( + let gravity_events = self.extract_gravity_events_from_receipts( &result.execution_output.receipts, result.block.number, + epoch, ); - // Check if any transaction (including JWK transactions) triggered a new epoch - // TODO(gravity_lightman): We need further more tests to test this branch - if let Some((new_epoch, validators)) = epoch_change_result { - // New epoch triggered, advance epoch and discard the block. - assert_eq!(new_epoch, epoch + 1); - info!(target: "execute_ordered_block", - id=?block_id, - parent_id=?parent_id, - number=?block_number, - new_epoch=?new_epoch, - "emit new epoch from transaction execution, discard the block" - ); - // Add NewEpoch event to gravity_events - gravity_events.push(GravityEvent::NewEpoch(new_epoch, validators.into())); - result.epoch = new_epoch; - } - result.gravity_events.extend(gravity_events); result } diff --git a/crates/pipe-exec-layer-ext-v2/execute/src/mint_precompile.rs b/crates/pipe-exec-layer-ext-v2/execute/src/mint_precompile.rs new file mode 100644 index 000000000..3b8c73449 --- /dev/null +++ b/crates/pipe-exec-layer-ext-v2/execute/src/mint_precompile.rs @@ -0,0 +1,156 @@ +//! Mint Token Precompile Contract +//! +//! This precompile allows authorized callers (JWK Manager) to mint tokens +//! directly to specified recipient addresses. + +use alloy_primitives::{address, map::HashMap, Address, Bytes, U256}; +use grevm::ParallelState; +use parking_lot::Mutex; +use reth_evm::{ + precompiles::{DynPrecompile, PrecompileInput}, + ParallelDatabase, +}; +use revm::precompile::{PrecompileError, PrecompileId, PrecompileOutput, PrecompileResult}; +use std::sync::Arc; +use tracing::{info, warn}; + +/// Authorized caller address (JWK Manager at 0x2018) +/// +/// Only this address is allowed to call the mint precompile. +pub const AUTHORIZED_CALLER: Address = address!("0x595475934ed7d9faa7fca28341c2ce583904a44e"); + +/// Function ID for mint operation +const FUNC_MINT: u8 = 0x01; + +/// Base gas cost for mint operation +const MINT_BASE_GAS: u64 = 21000; + +/// Creates a mint token precompile contract instance with state access. +/// +/// The precompile contract allows authorized callers to submit mint requests +/// and directly modifies the recipient's balance in the state. +/// +/// # Arguments +/// +/// * `state` - Shared ParallelState wrapped in `Arc>` for thread-safe access +/// +/// # Returns +/// +/// A dynamic precompile that can be registered with the EVM +pub fn create_mint_token_precompile( + state: Arc>>, +) -> DynPrecompile { + let precompile_id = PrecompileId::custom("mint_token"); + + (precompile_id, move |input: PrecompileInput<'_>| -> PrecompileResult { + mint_token_handler(input, state.clone()) + }) + .into() +} + +/// Mint Token handler function +/// +/// # Security +/// +/// - Only JWK Manager (0x2018) is allowed to call this precompile +/// - Calls from other addresses will be rejected with an error +/// +/// # Parameter format (53 bytes) +/// +/// | Offset | Size | Description | +/// |--------|------|-------------| +/// | 0 | 1 | Function ID (0x01) | +/// | 1 | 20 | Recipient address | +/// | 21 | 32 | Amount (U256, big-endian) | +/// +/// # Errors +/// +/// - `Unauthorized caller` - Caller is not the authorized JWK Manager +/// - `Invalid input length` - Input data is less than 53 bytes +/// - `Invalid function ID` - Function ID is not 0x01 +/// - `Invalid or zero amount` - Amount is zero or exceeds u128::MAX +fn mint_token_handler( + input: PrecompileInput<'_>, + state: Arc>>, +) -> PrecompileResult { + // 1. Validate caller address + if input.caller != AUTHORIZED_CALLER { + warn!( + target: "evm::precompile::mint_token", + caller = ?input.caller, + authorized = ?AUTHORIZED_CALLER, + "Unauthorized caller" + ); + return Err(PrecompileError::Other("Unauthorized caller".into())); + } + + // 2. Parameter length check (1 + 20 + 32 = 53 bytes) + const EXPECTED_LEN: usize = 1 + 20 + 32; + if input.data.len() < EXPECTED_LEN { + warn!( + target: "evm::precompile::mint_token", + input_len = input.data.len(), + expected = EXPECTED_LEN, + "Invalid input length" + ); + return Err(PrecompileError::Other( + format!("Invalid input length: {}, expected {}", input.data.len(), EXPECTED_LEN).into(), + )); + } + + // 3. Parse and validate function ID + if input.data[0] != FUNC_MINT { + warn!( + target: "evm::precompile::mint_token", + func_id = input.data[0], + expected = FUNC_MINT, + "Invalid function ID" + ); + return Err(PrecompileError::Other( + format!("Invalid function ID: {:#x}, expected {:#x}", input.data[0], FUNC_MINT).into(), + )); + } + + // 4. Parse recipient address (bytes 1-20) + let recipient = Address::from_slice(&input.data[1..21]); + + // 5. Parse amount (bytes 21-52) + let amount_u256 = U256::from_be_slice(&input.data[21..53]); + let amount: u128 = amount_u256.try_into().map_err(|_| { + warn!( + target: "evm::precompile::mint_token", + ?recipient, + amount = ?amount_u256, + "Amount exceeds u128::MAX" + ); + PrecompileError::Other("Amount exceeds u128::MAX".into()) + })?; + + if amount == 0 { + warn!(target: "evm::precompile::mint_token", ?recipient, "Zero amount"); + return Err(PrecompileError::Other("Zero amount not allowed".into())); + } + + // 6. Execute mint operation + let mut state_guard = state.lock(); + if let Err(e) = state_guard.increment_balances(HashMap::from([(recipient, amount)])) { + warn!( + target: "evm::precompile::mint_token", + ?recipient, + amount, + error = ?e, + "Failed to increment balance" + ); + return Err(PrecompileError::Other("Failed to mint tokens".into())); + } + drop(state_guard); + + info!( + target: "evm::precompile::mint_token", + ?recipient, + amount, + "Minted tokens successfully" + ); + + Ok(PrecompileOutput { gas_used: MINT_BASE_GAS, bytes: Bytes::new(), reverted: false }) +} diff --git a/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/errors.rs b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/errors.rs new file mode 100644 index 000000000..2538c0795 --- /dev/null +++ b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/errors.rs @@ -0,0 +1,344 @@ +//! System transaction error types and decoding +//! +//! This module provides error types and decoding logic for system transactions: +//! - Metadata transactions (onBlockStart) +//! - DKG transactions (finishTransition) +//! - JWK/Oracle transactions (NativeOracle.record) + +use alloy_primitives::Bytes; +use alloy_sol_macro::sol; +use alloy_sol_types::SolError; +use revm::context_interface::result::HaltReason; +use std::fmt; +use tracing::{error, warn}; + +// ============================================================================ +// Error Definitions from Solidity Contracts +// ============================================================================ + +sol! { + // -------------------- SystemAccessControl Errors -------------------- + /// @notice Caller is not authorized for the operation + error Unauthorized(); + + // -------------------- Timestamp Errors -------------------- + /// @notice Timestamp must advance for normal blocks + error TimestampMustAdvance(uint64 proposed, uint64 current); + + /// @notice Timestamp must equal current for NIL blocks + error TimestampMustEqual(uint64 proposed, uint64 current); + + // -------------------- Validator Errors -------------------- + /// @notice Validator index out of bounds + error ValidatorIndexOutOfBounds(uint64 index, uint64 total); + + // -------------------- Reconfiguration Errors -------------------- + /// @notice Reconfiguration is already in progress + error ReconfigurationInProgress(); + + /// @notice No reconfiguration in progress + error ReconfigurationNotInProgress(); + + /// @notice Reconfiguration contract has not been initialized + error ReconfigurationNotInitialized(); + + // -------------------- DKG Errors -------------------- + /// @notice DKG session is already in progress + error DKGInProgress(); + + /// @notice No DKG session is in progress + error DKGNotInProgress(); + + /// @notice DKG contract has not been initialized + error DKGNotInitialized(); + + // -------------------- NativeOracle Errors -------------------- + /// @notice Nonce must be strictly increasing for each source + error NonceNotIncreasing(uint32 sourceType, uint256 sourceId, uint128 currentNonce, uint128 providedNonce); + + /// @notice Batch arrays have mismatched lengths + error OracleBatchArrayLengthMismatch(uint256 noncesLength, uint256 payloadsLength, uint256 gasLimitsLength); + + // -------------------- JWKManager Errors (for reference, callback failures don't revert main tx) -------------------- + /// @notice JWK version must be strictly increasing + error JWKVersionNotIncreasing(bytes issuer, uint64 currentVersion, uint64 providedVersion); +} + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Severity of system transaction error +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ErrorSeverity { + /// Fatal error - should halt block production + Fatal, + /// Recoverable error - can skip this transaction and continue + Recoverable, +} + +/// Decoded system transaction error +#[derive(Debug, Clone)] +pub struct SystemTxnError { + /// Name of the error (from Solidity) + pub name: String, + /// Human-readable details + pub details: String, + /// Error severity + pub severity: ErrorSeverity, +} + +impl fmt::Display for SystemTxnError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[{:?}] {}: {}", self.severity, self.name, self.details) + } +} + +/// Result of decoding a system transaction execution result +#[derive(Debug)] +pub enum SystemTxnExecutionResult { + /// Execution succeeded + Success, + /// Execution reverted with a known error + KnownError(SystemTxnError), + /// Execution reverted with an unknown error + UnknownRevert { output: Bytes }, + /// Execution halted (e.g., out of gas) + Halt { reason: HaltReason }, +} + +// ============================================================================ +// Error Decoding +// ============================================================================ + +/// Decode a revert output into a known error type +/// +/// Uses 4-byte selector matching for O(1) lookup, then decodes only the matched error. +pub fn decode_revert_error(output: &Bytes) -> Option { + // Need at least 4 bytes for selector + if output.len() < 4 { + return None; + } + + // Extract 4-byte selector + let selector: [u8; 4] = output[..4].try_into().ok()?; + + // Match selector and decode + match selector { + // -------------------- Fatal Errors -------------------- + s if s == Unauthorized::SELECTOR => Some(SystemTxnError { + name: "Unauthorized".into(), + details: "Caller is not authorized (should be SYSTEM_CALLER)".into(), + severity: ErrorSeverity::Fatal, + }), + + s if s == TimestampMustAdvance::SELECTOR => { + let err = TimestampMustAdvance::abi_decode(output).ok()?; + Some(SystemTxnError { + name: "TimestampMustAdvance".into(), + details: format!( + "Timestamp must increase: proposed={} <= current={}", + err.proposed, err.current + ), + severity: ErrorSeverity::Fatal, + }) + } + + s if s == TimestampMustEqual::SELECTOR => { + let err = TimestampMustEqual::abi_decode(output).ok()?; + Some(SystemTxnError { + name: "TimestampMustEqual".into(), + details: format!( + "NIL block timestamp mismatch: proposed={} != current={}", + err.proposed, err.current + ), + severity: ErrorSeverity::Fatal, + }) + } + + s if s == ValidatorIndexOutOfBounds::SELECTOR => { + let err = ValidatorIndexOutOfBounds::abi_decode(output).ok()?; + Some(SystemTxnError { + name: "ValidatorIndexOutOfBounds".into(), + details: format!("Proposer index {} >= total validators {}", err.index, err.total), + severity: ErrorSeverity::Fatal, + }) + } + + s if s == ReconfigurationNotInitialized::SELECTOR => Some(SystemTxnError { + name: "ReconfigurationNotInitialized".into(), + details: "Reconfiguration contract not initialized".into(), + severity: ErrorSeverity::Fatal, + }), + + s if s == DKGNotInitialized::SELECTOR => Some(SystemTxnError { + name: "DKGNotInitialized".into(), + details: "DKG contract not initialized".into(), + severity: ErrorSeverity::Fatal, + }), + + s if s == OracleBatchArrayLengthMismatch::SELECTOR => { + let err = OracleBatchArrayLengthMismatch::abi_decode(output).ok()?; + Some(SystemTxnError { + name: "OracleBatchArrayLengthMismatch".into(), + details: format!( + "Array length mismatch: nonces={}, payloads={}, gasLimits={}", + err.noncesLength, err.payloadsLength, err.gasLimitsLength + ), + severity: ErrorSeverity::Fatal, + }) + } + + // -------------------- Recoverable Errors -------------------- + s if s == ReconfigurationNotInProgress::SELECTOR => Some(SystemTxnError { + name: "ReconfigurationNotInProgress".into(), + details: "No epoch transition in progress (possibly already completed)".into(), + severity: ErrorSeverity::Recoverable, + }), + + s if s == ReconfigurationInProgress::SELECTOR => Some(SystemTxnError { + name: "ReconfigurationInProgress".into(), + details: "Epoch transition already in progress".into(), + severity: ErrorSeverity::Recoverable, + }), + + s if s == DKGNotInProgress::SELECTOR => Some(SystemTxnError { + name: "DKGNotInProgress".into(), + details: "No DKG session in progress (possibly already finished)".into(), + severity: ErrorSeverity::Recoverable, + }), + + s if s == DKGInProgress::SELECTOR => Some(SystemTxnError { + name: "DKGInProgress".into(), + details: "DKG session already in progress".into(), + severity: ErrorSeverity::Recoverable, + }), + + s if s == NonceNotIncreasing::SELECTOR => { + let err = NonceNotIncreasing::abi_decode(output).ok()?; + Some(SystemTxnError { + name: "NonceNotIncreasing".into(), + details: format!( + "Oracle nonce not increasing: sourceType={}, sourceId={}, current={}, provided={}", + err.sourceType, err.sourceId, err.currentNonce, err.providedNonce + ), + severity: ErrorSeverity::Recoverable, + }) + } + + // Unknown selector + _ => None, + } +} + +/// Analyze a system transaction execution result +pub fn analyze_execution_result( + result: &revm::context_interface::result::ExecutionResult, +) -> SystemTxnExecutionResult { + use revm::context_interface::result::ExecutionResult; + + match result { + ExecutionResult::Success { .. } => SystemTxnExecutionResult::Success, + ExecutionResult::Revert { output, .. } => { + if let Some(error) = decode_revert_error(output) { + SystemTxnExecutionResult::KnownError(error) + } else { + SystemTxnExecutionResult::UnknownRevert { output: output.clone() } + } + } + ExecutionResult::Halt { reason, .. } => { + SystemTxnExecutionResult::Halt { reason: reason.clone() } + } + } +} + +/// Log an execution error with appropriate severity +/// +/// This is the simplified public API for logging system transaction errors. +/// Call this when `result.is_success()` returns false. +pub fn log_execution_error(result: &revm::context_interface::result::ExecutionResult) { + use revm::context_interface::result::ExecutionResult; + + match result { + ExecutionResult::Success { .. } => { + // No error to log + } + ExecutionResult::Revert { output, .. } => { + if let Some(err) = decode_revert_error(output) { + match err.severity { + ErrorSeverity::Fatal => { + error!("[FATAL] System transaction failed: {} - {}", err.name, err.details); + } + ErrorSeverity::Recoverable => { + warn!( + "[RECOVERABLE] System transaction failed: {} - {}", + err.name, err.details + ); + } + } + } else { + error!("System transaction reverted with unknown error: 0x{}", hex::encode(output)); + } + } + ExecutionResult::Halt { reason, .. } => { + error!("System transaction halted: {:?}", reason); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_sol_types::SolError; + + #[test] + fn test_decode_timestamp_must_advance() { + let error = TimestampMustAdvance { proposed: 1000, current: 2000 }; + let encoded = error.abi_encode(); + let result = decode_revert_error(&encoded.into()); + + assert!(result.is_some()); + let err = result.unwrap(); + assert_eq!(err.name, "TimestampMustAdvance"); + assert_eq!(err.severity, ErrorSeverity::Fatal); + assert!(err.details.contains("1000")); + assert!(err.details.contains("2000")); + } + + #[test] + fn test_decode_reconfiguration_not_in_progress() { + let error = ReconfigurationNotInProgress {}; + let encoded = error.abi_encode(); + let result = decode_revert_error(&encoded.into()); + + assert!(result.is_some()); + let err = result.unwrap(); + assert_eq!(err.name, "ReconfigurationNotInProgress"); + assert_eq!(err.severity, ErrorSeverity::Recoverable); + } + + #[test] + fn test_decode_nonce_not_increasing() { + let error = NonceNotIncreasing { + sourceType: 1, + sourceId: alloy_primitives::U256::from(42), + currentNonce: 10, + providedNonce: 5, + }; + let encoded = error.abi_encode(); + let result = decode_revert_error(&encoded.into()); + + assert!(result.is_some()); + let err = result.unwrap(); + assert_eq!(err.name, "NonceNotIncreasing"); + assert_eq!(err.severity, ErrorSeverity::Recoverable); + } + + #[test] + fn test_decode_unknown_error() { + // Random bytes that don't match any known error + let unknown = Bytes::from(vec![0x12, 0x34, 0x56, 0x78, 0xAB, 0xCD]); + let result = decode_revert_error(&unknown); + assert!(result.is_none()); + } +} diff --git a/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/jwk_oracle.rs b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/jwk_oracle.rs index f339ee67f..7393337a7 100644 --- a/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/jwk_oracle.rs +++ b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/jwk_oracle.rs @@ -17,7 +17,7 @@ use alloy_sol_macro::sol; use alloy_sol_types::SolCall; use gravity_api_types::on_chain_config::jwks::{JWKStruct, ProviderJWKs}; use reth_ethereum_primitives::TransactionSigned; -use tracing::{debug, info}; +use tracing::{debug, info, warn}; /// Default callback gas limit for oracle updates const CALLBACK_GAS_LIMIT: u64 = 500_000; @@ -59,6 +59,7 @@ fn is_rsa_jwk(jwk: &JWKStruct) -> bool { /// Checks for sourceType string (0, 1, 2, etc.) instead of fixed type_name fn is_unsupported_jwk(jwk: &JWKStruct) -> bool { // Check if type_name is a numeric string (sourceType) + // TODO(gravity): check if it should be "0x1::jwks::UNSUPPORTED_JWK" jwk.type_name.parse::().is_ok() } @@ -75,46 +76,66 @@ fn parse_rsa_jwk_from_bcs(data: &[u8]) -> Option { } /// Parse chain_id from issuer URI -/// Format: gravity://{chain_id}/events?... +/// Format: gravity://{source_type}/{chain_id}/{task_type}?... fn parse_chain_id_from_issuer(issuer: &[u8]) -> Option { let issuer_str = String::from_utf8_lossy(issuer); if issuer_str.starts_with("gravity://") { let after_protocol = &issuer_str[10..]; - if let Some(slash_pos) = after_protocol.find('/') { - let chain_id_str = &after_protocol[..slash_pos]; - return chain_id_str.parse().ok(); - } + // Skip source_type (first segment), get chain_id (second segment) + // Format: {source_type}/{chain_id}/{task_type}?... + let mut parts = after_protocol.split('/'); + let _source_type = parts.next()?; // Skip source_type + let chain_id_str = parts.next()?; + return chain_id_str.parse().ok(); } None } -/// Extract nonce from ABI-encoded event payload -/// Payload format: abi.encode(address, bytes32[], bytes, uint64 block_number, uint64 log_index) -/// Returns block_number * 1000 + log_index as unique nonce -fn extract_nonce_from_payload(payload: &[u8]) -> Option { - // ABI-encoded payload structure: - // - address (32 bytes padded) - // - offset to topics array (32 bytes) - // - offset to data bytes (32 bytes) - // - block_number (32 bytes as uint64) - // - log_index (32 bytes as uint64) - // Then dynamic data... - - if payload.len() < 160 { - // Minimum: 5 * 32 bytes for fixed parts +/// Extract nonce and inner payload from ABI-encoded event data +/// Payload format: alloy's abi_encode(&(u128, &[u8])) +/// +/// alloy encodes (u128, &[u8]) as a dynamic tuple with structure: +/// - bytes 0-31: offset to tuple data (always 32 = 0x20) +/// - bytes 32-63: nonce (uint128, right-aligned, so nonce is at bytes 48-63) +/// - bytes 64-95: offset to bytes data (relative to tuple start at byte 32) +/// - bytes 96-127: payload length +/// - bytes 128+: payload data +/// +/// Returns (nonce, inner_payload) +fn extract_nonce_and_payload(data: &[u8]) -> Option<(u128, Vec)> { + // Minimum: 32 (tuple offset) + 32 (nonce) + 32 (bytes offset) + 32 (length) = 128 bytes + if data.len() < 128 { + warn!( + target: "gravity::onchain_config::jwk_oracle", + data_len = data.len(), + "Data too short for ABI decoding" + ); return None; } - // block_number is at offset 96 (3 * 32) - let block_number_bytes = &payload[96..128]; - let block_number = U256::from_be_slice(&block_number_bytes[..32]); + // nonce is at bytes 32-63, right-aligned u128 so actual value is at bytes 48-63 + let nonce_bytes = &data[48..64]; + let nonce = u128::from_be_bytes(nonce_bytes.try_into().ok()?); + + // Payload length is at bytes 96-127 (right-aligned u256) + let length_bytes = &data[96..128]; + let payload_len = u64::from_be_bytes(length_bytes[24..32].try_into().ok()?) as usize; + + // Check we have enough data for the payload + if data.len() < 128 + payload_len { + warn!( + target: "gravity::onchain_config::jwk_oracle", + data_len = data.len(), + payload_len = payload_len, + "Not enough data for payload" + ); + return None; + } - // log_index is at offset 128 (4 * 32) - let log_index_bytes = &payload[128..160]; - let log_index = U256::from_be_slice(&log_index_bytes[..32]); + // Extract the inner payload starting at byte 128 + let inner_payload = data[128..128 + payload_len].to_vec(); - let nonce = block_number.saturating_to::() * 1000 + log_index.saturating_to::(); - Some(nonce) + Some((nonce, inner_payload)) } // ============================================================================= @@ -150,6 +171,7 @@ pub fn construct_oracle_record_transaction( // Blockchain/oracle events - use recordBatch for ALL logs construct_blockchain_batch_transaction(provider_jwks, nonce, gas_price) } else { + warn!(target: "gravity::onchain_config::jwk_oracle", "Unknown JWK type '{}' for issuer: {}", first_jwk.type_name, issuer_str); Err(format!("Unknown JWK type '{}' for issuer: {}", first_jwk.type_name, issuer_str)) } } @@ -209,6 +231,7 @@ fn construct_blockchain_batch_transaction( // Parse chain_id from issuer let chain_id = parse_chain_id_from_issuer(issuer) .ok_or_else(|| format!("Failed to parse chain_id from issuer: {:?}", issuer))?; + info!(target: "gravity::onchain_config::jwk_oracle", "jwk chain_id: {}, len {:?}", chain_id, jwks.len()); // All JWKs are guaranteed to be unsupported type when entering this function if jwks.is_empty() { @@ -216,10 +239,7 @@ fn construct_blockchain_batch_transaction( } // Parse sourceType from first JWK's type_name (all have same type) - let source_type: u32 = jwks[0] - .type_name - .parse() - .map_err(|_| format!("Invalid sourceType: {}", jwks[0].type_name))?; + let source_type: u32 = 0; // Build batch arrays let mut nonces: Vec = Vec::with_capacity(jwks.len()); @@ -227,21 +247,32 @@ fn construct_blockchain_batch_transaction( let mut gas_limits: Vec = Vec::with_capacity(jwks.len()); for (idx, jwk) in jwks.iter().enumerate() { - // Extract nonce from the ABI-encoded payload - let event_nonce = extract_nonce_from_payload(&jwk.data) - .ok_or_else(|| format!("Failed to extract nonce from payload at index {}", idx))?; + // Extract nonce and inner payload from ABI-encoded (nonce, payload) + let (event_nonce, inner_payload) = match extract_nonce_and_payload(&jwk.data) { + Some((nonce, payload)) => (nonce, payload), + None => { + warn!( + target: "gravity::onchain_config::jwk_oracle", + idx = idx, + payload_len = jwk.data.len(), + payload_hex = %hex::encode(&jwk.data), + "Failed to extract nonce and payload" + ); + return Err(format!("Failed to extract nonce and payload at index {}", idx)); + } + }; nonces.push(event_nonce); - // Pass payload through UNCHANGED - this is critical for comparison matching - // The relayer already ABI-encoded the data, we just store it as-is - payloads.push(jwk.data.clone().into()); + // Use the inner payload (the original MessageSent.payload) + // This is what the user put in and what gets passed to the callback + payloads.push(inner_payload.into()); gas_limits.push(U256::from(CALLBACK_GAS_LIMIT)); debug!( idx = idx, event_nonce = event_nonce, - payload_len = jwk.data.len(), - "Added event to batch (pass-through)" + inner_payload_len = payloads.last().map(|p: &Bytes| p.len()).unwrap_or(0), + "Added event to batch" ); } diff --git a/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/metadata_txn.rs b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/metadata_txn.rs index eac7926fe..bc612e403 100644 --- a/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/metadata_txn.rs +++ b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/metadata_txn.rs @@ -31,16 +31,29 @@ use std::fmt::Debug; pub const NIL_PROPOSER_INDEX: u64 = u64::MAX; /// Result of a metadata transaction execution +/// Merge new state changes into accumulated state changes +/// +/// This is a helper function to accumulate state changes from multiple +/// sequential transaction executions. +pub fn merge_state_changes(accumulated: &mut EvmState, new_changes: EvmState) { + for (addr, account) in new_changes { + accumulated.insert(addr, account); + } +} + +/// Result of a system transaction execution (metadata, DKG, or JWK) +/// This is a unified structure for all system-level transactions that are executed before +/// the parallel executor. #[derive(Debug)] -pub struct MetadataTxnResult { - /// Result of the metadata transaction execution +pub struct SystemTxnResult { + /// Result of the system transaction execution pub result: ExecutionResult, - /// The metadata transaction + /// The system transaction pub txn: TransactionSigned, } -impl MetadataTxnResult { - /// Check if the transaction emitted a `NewEpochEvent` event +impl SystemTxnResult { + /// Check if the transaction emitted a `NewEpoch` event pub fn emit_new_epoch(&self) -> Option<(u64, Bytes)> { for log in self.result.logs() { match NewEpochEvent::decode_log(log) { @@ -55,7 +68,8 @@ impl MetadataTxnResult { None } - /// Convert the metadata transaction result into a full executed block result + /// Convert the system transaction result into a full executed block result + /// Used when new epoch is triggered and the block needs to be discarded. pub(crate) fn into_executed_ordered_block_result( self, chain_spec: &ChainSpec, @@ -122,31 +136,75 @@ impl MetadataTxnResult { } } - /// Insert this metadata transaction into an existing executed block result + /// Insert this system transaction into an existing executed block result at the specified + /// position Position 0 is reserved for metadata tx, positions 1+ are for validator + /// transactions pub(crate) fn insert_to_executed_ordered_block_result( self, - result: &mut ExecuteOrderedBlockResult, + result: &mut crate::ExecuteOrderedBlockResult, + insert_position: usize, ) { let gas_used = self.result.gas_used(); result.block.header.gas_used += gas_used; result.execution_output.gas_used += gas_used; - result.execution_output.receipts.iter_mut().for_each(|receipt| { + + // Calculate cumulative_gas_used for this system transaction: + // It should be the cumulative gas of the previous transaction (at insert_position - 1) + // plus this transaction's gas_used + let cumulative_gas_used = if insert_position == 0 { + // First transaction, cumulative equals its own gas_used + gas_used + } else { + // Get cumulative from the previous receipt and add this tx's gas + result + .execution_output + .receipts + .get(insert_position - 1) + .map(|prev| prev.cumulative_gas_used + gas_used) + .unwrap_or(gas_used) + }; + + // Update all receipts AFTER insert_position to add this tx's gas + for receipt in result.execution_output.receipts.iter_mut().skip(insert_position) { receipt.cumulative_gas_used += gas_used; - }); + } + result.execution_output.receipts.insert( - 0, + insert_position, Receipt { tx_type: self.txn.tx_type(), success: true, - cumulative_gas_used: gas_used, + cumulative_gas_used, logs: self.result.into_logs(), }, ); - result.block.body.transactions.insert(0, self.txn); - result.senders.insert(0, SYSTEM_CALLER); + result.block.body.transactions.insert(insert_position, self.txn); + result.senders.insert(insert_position, SYSTEM_CALLER); } } +/// Execute a single system transaction (metadata, DKG, or JWK) +/// +/// This is the unified entry point for executing all system-level transactions. +/// These transactions are executed one by one before the parallel executor. +pub fn transact_system_txn( + evm: &mut impl Evm, + txn: TransactionSigned, +) -> (SystemTxnResult, EvmState) { + use reth_evm::IntoTxEnv; + use reth_primitives::Recovered; + + let tx_env = Recovered::new_unchecked(txn.clone(), SYSTEM_CALLER).into_tx_env(); + let result = evm.transact_raw(tx_env).unwrap(); + + // Log any execution errors with appropriate severity + if !result.result.is_success() { + super::errors::log_execution_error(&result.result); + } + + (SystemTxnResult { result: result.result, txn }, result.state) +} + /// Create a new system call transaction fn new_system_call_txn( contract: Address, @@ -178,11 +236,12 @@ fn new_system_call_txn( /// /// @param proposer_index Index of the proposer in the active validator set, /// or None for NIL blocks (will use NIL_PROPOSER_INDEX = u64::MAX) -pub fn transact_metadata_contract_call( - evm: &mut impl Evm, +pub fn construct_metadata_txn( + nonce: u64, + gas_price: u128, timestamp_us: u64, proposer_index: Option, -) -> (MetadataTxnResult, EvmState) { +) -> TransactionSigned { // For NIL blocks, use NIL_PROPOSER_INDEX (type(uint64).max in Solidity) let proposer_idx = proposer_index.unwrap_or(NIL_PROPOSER_INDEX); @@ -193,18 +252,5 @@ pub fn transact_metadata_contract_call( }; let input: Bytes = call.abi_encode().into(); - let system_call_account = - evm.db_mut().basic(SYSTEM_CALLER).unwrap().expect("SYSTEM_CALLER not exists"); - let txn = new_system_call_txn( - BLOCK_ADDR, - system_call_account.nonce, - evm.block().basefee as u128, - input, - ); - let tx_env = Recovered::new_unchecked(txn.clone(), SYSTEM_CALLER).into_tx_env(); - let result = evm.transact_raw(tx_env).unwrap(); - - assert!(result.result.is_success(), "Failed to execute onBlockStart: {:?}", result.result); - - (MetadataTxnResult { result: result.result, txn }, result.state) + new_system_call_txn(BLOCK_ADDR, nonce, gas_price, input) } diff --git a/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/mod.rs b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/mod.rs index d51f380ad..f97c92d79 100644 --- a/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/mod.rs +++ b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/mod.rs @@ -6,6 +6,7 @@ pub mod base; pub mod consensus_config; pub mod dkg; pub mod epoch; +pub mod errors; pub mod jwk_consensus_config; pub mod jwk_oracle; pub mod metadata_txn; @@ -18,7 +19,7 @@ pub mod validator_set; pub use base::{ConfigFetcher, OnchainConfigFetcher}; pub use consensus_config::ConsensusConfigFetcher; pub use epoch::EpochFetcher; -pub use metadata_txn::{transact_metadata_contract_call, MetadataTxnResult}; +pub use metadata_txn::{construct_metadata_txn, transact_system_txn, SystemTxnResult}; pub use types::{ convert_active_validators_to_bcs, convert_validator_consensus_info, ValidatorConsensusInfo, ValidatorStatus, @@ -99,7 +100,7 @@ use alloy_consensus::{EthereumTxEnvelope, TxEip4844, TxLegacy}; use alloy_primitives::{Bytes, Signature, U256}; use reth_ethereum_primitives::{Transaction, TransactionSigned}; use revm_primitives::TxKind; -use tracing::{debug, info}; +use tracing::{debug, info, warn}; /// Construct validator transactions envelope (JWK updates and DKG transcripts) /// @@ -118,7 +119,7 @@ pub fn construct_validator_txns_envelope( let current_nonce = system_caller_nonce + index as u64; // Process data based on ExtraDataType variant - match process_extra_data(data, current_nonce, gas_price) { + match construct_validator_txn_from_extra_data(data, current_nonce, gas_price) { Ok(transaction) => txns.push(transaction), Err(e) => { return Err(format!("Failed to process extra data at index {}: {}", index, e)); @@ -129,12 +130,12 @@ pub fn construct_validator_txns_envelope( Ok(txns) } -/// Process extra data based on its ExtraDataType variant +/// Construct a single validator transaction from ExtraDataType /// /// Supports: /// - JWK/Oracle updates (ExtraDataType::JWK) - includes both RSA JWKs and blockchain events /// - DKG transcripts (ExtraDataType::DKG) -fn process_extra_data( +pub fn construct_validator_txn_from_extra_data( data: &gravity_api_types::ExtraDataType, nonce: u64, gas_price: u128, diff --git a/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/types.rs b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/types.rs index 557ad3a1a..60a221387 100644 --- a/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/types.rs +++ b/crates/pipe-exec-layer-ext-v2/execute/src/onchain_config/types.rs @@ -188,6 +188,21 @@ sol! { event ObservedJWKsUpdated(uint256 indexed epoch, OracleProviderJWKs[] jwks); } +sol! { + /// DataRecorded event from NativeOracle contract + /// Emitted when data is recorded by the consensus engine via SYSTEM_CALLER + /// @param sourceType The source type (0 = BLOCKCHAIN, 1 = JWK, etc.) + /// @param sourceId The source identifier (e.g., chain ID for blockchains) + /// @param nonce The nonce (block height, timestamp, etc.) + /// @param dataLength Length of the stored data + event DataRecorded( + uint32 indexed sourceType, + uint256 indexed sourceId, + uint128 nonce, + uint256 dataLength + ); +} + /// RSA JWK fields for BCS serialization - matches gravity-aptos struct order #[derive(serde::Serialize, serde::Deserialize)] pub struct GaptosRsaJwk { diff --git a/crates/pipe-exec-layer-ext-v2/relayer/src/blockchain_source.rs b/crates/pipe-exec-layer-ext-v2/relayer/src/blockchain_source.rs index 9c99c6cdd..348a6d0e1 100644 --- a/crates/pipe-exec-layer-ext-v2/relayer/src/blockchain_source.rs +++ b/crates/pipe-exec-layer-ext-v2/relayer/src/blockchain_source.rs @@ -6,7 +6,7 @@ use crate::{ data_source::{source_types, OracleData, OracleDataSource}, eth_client::EthHttpCli, }; -use alloy_primitives::{Address, Bytes, U256}; +use alloy_primitives::{hex, Address, Bytes, U256}; use alloy_rpc_types::Filter; use alloy_sol_macro::sol; use alloy_sol_types::SolEvent; @@ -25,6 +25,40 @@ sol! { event MessageSent(uint128 indexed nonce, bytes payload); } +/// Decode ABI-encoded `bytes` from Solidity event log data +/// +/// Solidity encodes `bytes` in events as: +/// - bytes 0-31: offset to data (typically 0x20 = 32) +/// - bytes 32-63: length of data +/// - bytes 64+: actual data +/// +/// Returns the raw bytes without the ABI encoding wrapper. +fn decode_abi_bytes(data: &[u8]) -> Option> { + // Minimum: offset (32) + length (32) = 64 bytes + if data.len() < 64 { + return None; + } + + // Read offset (last 8 bytes of first 32-byte word, as it's right-aligned) + let offset = u64::from_be_bytes(data[24..32].try_into().ok()?) as usize; + + // Offset should point to the length word + if offset + 32 > data.len() { + return None; + } + + // Read length (last 8 bytes of the length word) + let length = u64::from_be_bytes(data[offset + 24..offset + 32].try_into().ok()?) as usize; + + // Data starts after the length word + let data_start = offset + 32; + if data_start + length > data.len() { + return None; + } + + Some(data[data_start..data_start + length].to_vec()) +} + /// Blockchain event source for monitoring GravityPortal.MessageSent events /// /// This is the primary data source for cross-chain message bridging. @@ -312,17 +346,43 @@ impl OracleDataSource for BlockchainEventSource { continue; } - let payload = log.data().data.clone(); + let log_data = log.data().data.clone(); + + // log.data is ABI-encoded `bytes payload` from Solidity event + // Format: offset (32 bytes) || length (32 bytes) || data (variable) + // We need to extract the raw PortalMessage bytes before re-encoding + let raw_payload = match decode_abi_bytes(&log_data) { + Some(payload) => payload, + None => { + warn!( + target: "blockchain_source", + chain_id = self.chain_id, + nonce = nonce, + log_data_len = log_data.len(), + "Failed to decode ABI bytes from log.data, skipping" + ); + continue; + } + }; + + // ABI encode (nonce, raw_payload) together + // This preserves the nonce when passing through JWKStruct + // Format: abi.encode(uint128 nonce, bytes payload) + // Now raw_payload is the actual PortalMessage (sender || messageNonce || message) + let encoded_payload = + alloy_sol_types::SolValue::abi_encode(&(nonce, raw_payload.as_slice())); debug!( target: "blockchain_source", chain_id = self.chain_id, nonce = nonce, - payload_len = payload.len(), - "Found new MessageSent event" + log_data_len = log_data.len(), + raw_payload_len = raw_payload.len(), + encoded_payload_len = encoded_payload.len(), + "Found new MessageSent event - decoded and re-encoded" ); - results.push(OracleData { nonce, payload: Bytes::from(payload.to_vec()) }); + results.push(OracleData { nonce, payload: Bytes::from(encoded_payload) }); } // Update cursor @@ -382,44 +442,44 @@ mod tests { /// Local Anvil chain ID const ANVIL_CHAIN_ID: u64 = 31337; - /// PortalMessage format decoder - /// Payload format: sender (20 bytes) || nonce (16 bytes) || message (variable) + /// PortalMessage format decoder for relayer output + /// + /// Relayer output format: abi.encode((uint128 nonce, bytes raw_portal_message)) + /// Where raw_portal_message is: sender (20 bytes) || messageNonce (16 bytes) || message + /// + /// ABI encoding structure: + /// - bytes 0-31: tuple offset (0x20) + /// - bytes 32-63: nonce (uint128, right-aligned, actual value at bytes 48-63) + /// - bytes 64-95: offset to bytes data (relative to tuple start) + /// - bytes 96-127: length of bytes data + /// - bytes 128+: raw PortalMessage data fn decode_portal_message(payload: &[u8]) -> Option<(Address, u128, Vec)> { - if payload.len() < 36 { - return None; - } - - // The payload is ABI-encoded as `bytes`, so first decode the outer wrapper - // ABI encoding: offset (32 bytes) || length (32 bytes) || data - if payload.len() < 64 { + // Minimum: tuple offset (32) + nonce (32) + bytes offset (32) + length (32) + min data (36) = 164 + if payload.len() < 164 { return None; } - // Read offset and length - let offset = u64::from_be_bytes(payload[24..32].try_into().ok()?) as usize; - if offset + 32 > payload.len() { - return None; - } + // Extract the raw PortalMessage from ABI-encoded (nonce, bytes) tuple + // Length is at bytes 96-127 (right-aligned) + let length = u64::from_be_bytes(payload[120..128].try_into().ok()?) as usize; - let length = - u64::from_be_bytes(payload[offset + 24..offset + 32].try_into().ok()?) as usize; - let data_start = offset + 32; - if data_start + length > payload.len() { + // PortalMessage data starts at byte 128 + if payload.len() < 128 + length { return None; } - let inner_data = &payload[data_start..data_start + length]; + let portal_message = &payload[128..128 + length]; - // Now parse PortalMessage: sender (20) || nonce (16) || message - if inner_data.len() < 36 { + // Now parse PortalMessage: sender (20) || messageNonce (16) || message + if portal_message.len() < 36 { return None; } - let sender = Address::from_slice(&inner_data[0..20]); - let nonce = u128::from_be_bytes(inner_data[20..36].try_into().ok()?); - let message = inner_data[36..].to_vec(); + let sender = Address::from_slice(&portal_message[0..20]); + let message_nonce = u128::from_be_bytes(portal_message[20..36].try_into().ok()?); + let message = portal_message[36..].to_vec(); - Some((sender, nonce, message)) + Some((sender, message_nonce, message)) } /// Decode bridge message: abi.encode(amount, recipient)