diff --git a/.db-versions.yml b/.db-versions.yml index 9a9dea695..73983d32d 100644 --- a/.db-versions.yml +++ b/.db-versions.yml @@ -1,5 +1,7 @@ -current_version: 1 +current_version: 2 versions: + - version: 2 + pr: 388 - version: 1 pr: 450 - version: 0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cdf30ddc..f99e28d02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Next release +- feat(rpc): added `getMessagesStatus` method - fix(primitives): limit legacy class sizes - fix(block_production): dynamic block closing now adds special address with prev block hash - fix(rpc): call, simulate, estimate rpcs executed on top of the block, not at the start of it diff --git a/Cargo.lock b/Cargo.lock index 50ce03c38..1e023e8f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5552,6 +5552,7 @@ dependencies = [ name = "mc-db" version = "0.7.0" dependencies = [ + "alloy", "anyhow", "bincode 1.3.3", "blockifier", @@ -5657,6 +5658,7 @@ dependencies = [ "anyhow", "bigdecimal", "bitvec", + "blockifier", "dotenv", "futures", "httpmock", @@ -5832,6 +5834,7 @@ dependencies = [ name = "mc-rpc" version = "0.7.0" dependencies = [ + "alloy", "anyhow", "bitvec", "blockifier", diff --git a/crates/madara/client/db/Cargo.toml b/crates/madara/client/db/Cargo.toml index 0f9c30e2d..c197482c4 100644 --- a/crates/madara/client/db/Cargo.toml +++ b/crates/madara/client/db/Cargo.toml @@ -36,6 +36,7 @@ starknet-types-rpc = { workspace = true } starknet_api = { workspace = true } # Other +alloy = { workspace = true } anyhow.workspace = true bincode = { workspace = true } librocksdb-sys = { workspace = true } diff --git a/crates/madara/client/db/src/l1_db.rs b/crates/madara/client/db/src/l1_db.rs index eb7bf4fd6..5f78bc621 100644 --- a/crates/madara/client/db/src/l1_db.rs +++ b/crates/madara/client/db/src/l1_db.rs @@ -1,9 +1,10 @@ +use crate::error::DbError; +use crate::{Column, DatabaseExt, MadaraBackend, MadaraStorageError}; +use alloy::primitives::TxHash; use rocksdb::{IteratorMode, WriteOptions}; use serde::{Deserialize, Serialize}; use starknet_api::core::Nonce; - -use crate::error::DbError; -use crate::{Column, DatabaseExt, MadaraBackend, MadaraStorageError}; +use starknet_types_core::felt::Felt; type Result = std::result::Result; @@ -129,6 +130,34 @@ impl MadaraBackend { Ok(()) } + pub fn get_l1_handler_tx_hashes(&self, l1_tx_hash: TxHash) -> Result, DbError> { + let l1_l2_mappings_column = self.db.get_column(Column::L1MessagingHandlerTxHashes); + let l1_handler_tx_hashes = self + .db + .prefix_iterator_cf(&l1_l2_mappings_column, l1_tx_hash.as_slice()) + .map(|kv_bytes| Ok(Felt::from_bytes_be_slice(&kv_bytes?.1))) + .collect::>()?; + Ok(l1_handler_tx_hashes) + } + + /// Store mapping from L1 transaction to L1 handler transaction (on the L2). A unique order + /// value is required to ensure the handler transactions are retreived in the correct order. + pub fn add_l1_handler_tx_hash_mapping( + &self, + l1_tx_hash: TxHash, + l1_handler_tx_hash: Felt, + order: u64, + ) -> Result<(), DbError> { + let l1_l2_mappings_column = self.db.get_column(Column::L1MessagingHandlerTxHashes); + let mut key = [0u8; 40]; + key[..32].copy_from_slice(l1_tx_hash.as_slice()); + key[32..].copy_from_slice(&order.to_be_bytes()); // BE is important for the lexographic sorting + let mut writeopts = WriteOptions::default(); + writeopts.disable_wal(true); + self.db.put_cf_opt(&l1_l2_mappings_column, key, l1_handler_tx_hash.to_bytes_be(), &writeopts)?; + Ok(()) + } + /// Retrieve the latest L1 messaging [Nonce] if one is available, otherwise /// returns [None]. pub fn get_l1_messaging_nonce_latest(&self) -> Result, MadaraStorageError> { diff --git a/crates/madara/client/db/src/lib.rs b/crates/madara/client/db/src/lib.rs index 33bc43c4e..eef56f71a 100644 --- a/crates/madara/client/db/src/lib.rs +++ b/crates/madara/client/db/src/lib.rs @@ -149,6 +149,7 @@ pub enum Column { L1Messaging, L1MessagingNonce, + L1MessagingHandlerTxHashes, /// Devnet: stores the private keys for the devnet predeployed contracts Devnet, @@ -196,6 +197,7 @@ impl Column { BonsaiClassesLog, L1Messaging, L1MessagingNonce, + L1MessagingHandlerTxHashes, PendingContractToClassHashes, PendingContractToNonces, PendingContractStorage, @@ -232,6 +234,7 @@ impl Column { ContractStorage => "contract_storage", L1Messaging => "l1_messaging", L1MessagingNonce => "l1_messaging_nonce", + L1MessagingHandlerTxHashes => "l1_messaging_handler_tx_hashes", PendingContractToClassHashes => "pending_contract_to_class_hashes", PendingContractToNonces => "pending_contract_to_nonces", PendingContractStorage => "pending_contract_storage", diff --git a/crates/madara/client/db/src/rocksdb_options.rs b/crates/madara/client/db/src/rocksdb_options.rs index 37aebb516..2fdd7248d 100644 --- a/crates/madara/client/db/src/rocksdb_options.rs +++ b/crates/madara/client/db/src/rocksdb_options.rs @@ -2,6 +2,8 @@ #![allow(non_upper_case_globals)] // allow KiB/MiB/GiB names use crate::{contract_db, Column}; +use alloy::primitives::private::alloy_rlp::MaxEncodedLenAssoc; +use alloy::primitives::TxHash; use anyhow::{Context, Result}; use rocksdb::{DBCompressionType, Env, Options, SliceTransform}; @@ -56,6 +58,9 @@ impl Column { contract_db::CONTRACT_NONCES_PREFIX_EXTRACTOR, )); } + Column::L1MessagingHandlerTxHashes => { + options.set_prefix_extractor(SliceTransform::create_fixed_prefix(TxHash::LEN)); + } _ => {} } diff --git a/crates/madara/client/eth/Cargo.toml b/crates/madara/client/eth/Cargo.toml index c944fdbb2..fa4766c88 100644 --- a/crates/madara/client/eth/Cargo.toml +++ b/crates/madara/client/eth/Cargo.toml @@ -28,6 +28,7 @@ mp-transactions.workspace = true mp-utils.workspace = true # Starknet +blockifier.workspace = true starknet-types-core.workspace = true starknet_api.workspace = true diff --git a/crates/madara/client/eth/src/l1_messaging.rs b/crates/madara/client/eth/src/l1_messaging.rs index 52b4af78e..00018a5f7 100644 --- a/crates/madara/client/eth/src/l1_messaging.rs +++ b/crates/madara/client/eth/src/l1_messaging.rs @@ -1,8 +1,9 @@ +use crate::client::EthereumClient; use crate::client::StarknetCoreContract::LogMessageToL2; -use crate::client::{EthereumClient, StarknetCoreContract}; use crate::utils::u256_to_felt; use alloy::eips::BlockNumberOrTag; use alloy::primitives::{keccak256, FixedBytes, U256}; +use alloy::rpc::types::Log; use alloy::sol_types::SolValue; use anyhow::Context; use futures::StreamExt; @@ -10,7 +11,8 @@ use mc_db::{l1_db::LastSyncedEventBlock, MadaraBackend}; use mc_mempool::{Mempool, MempoolProvider}; use mp_utils::service::ServiceContext; use starknet_api::core::{ChainId, ContractAddress, EntryPointSelector, Nonce}; -use starknet_api::transaction::{Calldata, L1HandlerTransaction, TransactionVersion}; +use starknet_api::transaction::{Calldata, L1HandlerTransaction, Transaction, TransactionVersion}; +use starknet_api::transaction_hash::get_transaction_hash; use starknet_types_core::felt::Felt; use std::sync::Arc; @@ -54,9 +56,10 @@ pub async fn sync( return Err(e.into()); } }; - let event_filter = client.l1_core_contract.event_filter::(); - let mut event_stream = event_filter + let mut event_stream = client + .l1_core_contract + .event_filter::() .from_block(last_synced_event_block.block_number) .to_block(BlockNumberOrTag::Finalized) .watch() @@ -67,105 +70,76 @@ pub async fn sync( .into_stream(); while let Some(Some(event_result)) = ctx.run_until_cancelled(event_stream.next()).await { - if let Ok((event, meta)) = event_result { - tracing::info!( - "⟠ Processing L1 Message from block: {:?}, transaction_hash: {:?}, log_index: {:?}, fromAddress: {:?}", - meta.block_number, - meta.transaction_hash, - meta.log_index, - event.fromAddress - ); - - // Check if cancellation was initiated - let event_hash = get_l1_to_l2_msg_hash(&event)?; - tracing::info!("⟠ Checking for cancelation, event hash : {:?}", event_hash); - let cancellation_timestamp = client.get_l1_to_l2_message_cancellations(event_hash).await?; - if cancellation_timestamp != Felt::ZERO { - tracing::info!("⟠ L1 Message was cancelled in block at timestamp : {:?}", cancellation_timestamp); - let tx_nonce = Nonce(u256_to_felt(event.nonce)?); - // cancelled message nonce should be inserted to avoid reprocessing - match backend.has_l1_messaging_nonce(tx_nonce) { - Ok(false) => { - backend.set_l1_messaging_nonce(tx_nonce)?; - } - Ok(true) => {} - Err(e) => { - tracing::error!("⟠ Unexpected DB error: {:?}", e); - return Err(e.into()); - } + match event_result { + Ok((event, log)) => { + if let Err(e) = process_l1_to_l2_msg(&backend, &client, &chain_id, &mempool, event, log).await { + tracing::error!("⟠ Unable to process L1 -> L2 messsage event: {e:?}"); }; - continue; - } - - match process_l1_message(&backend, &event, &meta.block_number, &meta.log_index, &chain_id, mempool.clone()) - .await - { - Ok(Some(tx_hash)) => { - tracing::info!( - "⟠ L1 Message from block: {:?}, transaction_hash: {:?}, log_index: {:?} submitted, \ - transaction hash on L2: {:?}", - meta.block_number, - meta.transaction_hash, - meta.log_index, - tx_hash - ); - } - Ok(None) => {} - Err(e) => { - tracing::error!( - "⟠ Unexpected error while processing L1 Message from block: {:?}, transaction_hash: {:?}, \ - log_index: {:?}, error: {:?}", - meta.block_number, - meta.transaction_hash, - meta.log_index, - e - ) - } } + Err(e) => tracing::error!("⟠ Unable to receive L1 -> L2 message event: {e}"), } } Ok(()) } -async fn process_l1_message( +async fn process_l1_to_l2_msg( backend: &MadaraBackend, - event: &LogMessageToL2, - l1_block_number: &Option, - event_index: &Option, - _chain_id: &ChainId, - mempool: Arc, -) -> anyhow::Result> { - let transaction = parse_handle_l1_message_transaction(event)?; - let tx_nonce = transaction.nonce; - let fees: u128 = event.fee.try_into()?; + client: &EthereumClient, + chain_id: &ChainId, + mempool: &Arc, + event: LogMessageToL2, + log: Log, +) -> anyhow::Result<()> { + tracing::debug!("⟠ Processing L1 -> L2 message event {event:#?}, contract address: {}, block number: {:?}, transaction index: {:?}, transaction hash: {:?}, log index: {:?}", + log.address(), log.block_number, log.transaction_index, log.transaction_hash, log.log_index + ); + + let tx_nonce = Nonce(u256_to_felt(event.nonce)?); // Ensure that L1 message has not been executed - match backend.has_l1_messaging_nonce(tx_nonce) { - Ok(false) => { - backend.set_l1_messaging_nonce(tx_nonce)?; - } - Ok(true) => { - tracing::debug!("⟠ Event already processed: {:?}", transaction); - return Ok(None); - } - Err(e) => { - tracing::error!("⟠ Unexpected DB error: {:?}", e); - return Err(e.into()); - } - }; + if backend.has_l1_messaging_nonce(tx_nonce)? { + tracing::debug!("⟠ L1 -> L2 event already processed: {tx_nonce:?}"); + return Ok(()); + } else { + backend.set_l1_messaging_nonce(tx_nonce)?; + } + + // Check if cancellation was initiated + let event_hash = get_l1_to_l2_msg_hash(&event); + let cancellation_timestamp = client.get_l1_to_l2_message_cancellations(event_hash).await?; + if cancellation_timestamp != Felt::ZERO { + tracing::info!("⟠ L1 message was cancelled at timestamp {:?}", cancellation_timestamp.to_biguint()); + return Ok(()); + } + + let l1_handler_transaction = parse_handle_l1_message_transaction(&event)?; + let fees: u128 = event.fee.try_into()?; - let res = mempool.tx_accept_l1_handler(transaction.into(), fees)?; + let tx_hash = get_transaction_hash( + &Transaction::L1Handler(l1_handler_transaction.clone()), + chain_id, + &l1_handler_transaction.version, + )?; - // TODO: remove unwraps - // Ques: shall it panic if no block number of event_index? - let block_sent = LastSyncedEventBlock::new(l1_block_number.unwrap(), event_index.unwrap()); - backend.messaging_update_last_synced_l1_block_with_event(block_sent)?; + mempool.tx_accept_l1_handler(l1_handler_transaction.clone().into(), fees)?; - Ok(Some(res.transaction_hash)) + let l1_tx_hash = log.transaction_hash.context("Missing transaction hash")?; + let block_number = log.block_number.context("Event missing block number")?; + let log_index = log.log_index.context("Event missing log index")?; + + // We use the log index for the order to ensure any L1 txs which have multiple messages are + // retrieved in the order they occured. + backend.add_l1_handler_tx_hash_mapping(l1_tx_hash, tx_hash.0, log_index)?; + + let last_synced_event_block = LastSyncedEventBlock::new(block_number, log_index); + backend.messaging_update_last_synced_l1_block_with_event(last_synced_event_block)?; + + tracing::info!("⟠ L1 message processed: {:?}, transaction hash: {:?}", l1_handler_transaction, tx_hash); + Ok(()) } -pub fn parse_handle_l1_message_transaction(event: &LogMessageToL2) -> anyhow::Result { +fn parse_handle_l1_message_transaction(event: &LogMessageToL2) -> anyhow::Result { // L1 from address. let from_address = u256_to_felt(event.fromAddress.into_word().into())?; @@ -198,7 +172,7 @@ pub fn parse_handle_l1_message_transaction(event: &LogMessageToL2) -> anyhow::Re } /// Computes the message hashed with the given event data -fn get_l1_to_l2_msg_hash(event: &LogMessageToL2) -> anyhow::Result> { +fn get_l1_to_l2_msg_hash(event: &LogMessageToL2) -> FixedBytes<32> { let data = ( [0u8; 12], event.fromAddress.0 .0, @@ -208,14 +182,11 @@ fn get_l1_to_l2_msg_hash(event: &LogMessageToL2) -> anyhow::Result (handler_tx.tx, handler_tx.tx_hash.0), + Transaction::AccountTransaction(_) => panic!("Expecting L1 handler transaction"), + }; + assert_eq!(handler_tx.nonce, nonce); + assert_eq!( + handler_tx.contract_address, + ContractAddress::try_from( + Felt::from_dec_str("3256441166037631918262930812410838598500200462657642943867372734773841898370") + .unwrap() + ) + .unwrap() + ); + assert_eq!( + handler_tx.entry_point_selector, + EntryPointSelector( + Felt::from_dec_str("774397379524139446221206168840917193112228400237242521560346153613428128537") + .unwrap() + ) + ); + assert_eq!( + handler_tx.calldata.0[0], + Felt::from_dec_str("993696174272377493693496825928908586134624850969").unwrap() + ); - // TODO : Assert that the tx has been included in the mempool + // Assert the L1 -> L2 mapping is stored + let l1_handler_tx_hashes = db + .backend() + .get_l1_handler_tx_hashes( + TxHash::from_hex("4961b0fef9f7d7c46fb9095b2b97ea3dc8157fca04e4f2562d1461ac3bb03867").unwrap(), + ) + .expect("Unable to get L1 -> L2 tx hashes mapping from DB"); + assert_eq!(l1_handler_tx_hashes, vec![handler_tx_hash]); // Assert that the event is well stored in db let last_block = db.backend().messaging_last_synced_l1_block_with_event().expect("failed to retrieve block").unwrap(); assert_ne!(last_block.block_number, 0); - let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); assert!(db.backend().has_l1_messaging_nonce(nonce).unwrap()); // TODO : Assert that the tx was correctly executed @@ -500,7 +499,7 @@ mod l1_messaging_tests { .unwrap() .block_number ); - assert!(logs_contain("Event already processed")); + assert!(logs_contain("L1 -> L2 event already processed")); worker_handle.abort(); } @@ -545,7 +544,7 @@ mod l1_messaging_tests { let nonce = Nonce(Felt::from_dec_str("10000000000000000").expect("failed to parse nonce string")); // cancelled message nonce should be inserted to avoid reprocessing assert!(db.backend().has_l1_messaging_nonce(nonce).unwrap()); - assert!(logs_contain("L1 Message was cancelled in block at timestamp : 0x66b4f105")); + assert!(logs_contain("L1 message was cancelled at timestamp 1723134213")); worker_handle.abort(); } @@ -571,8 +570,7 @@ mod l1_messaging_tests { ], nonce: U256::from(775628), fee: U256::ZERO, - }) - .expect("Failed to compute l1 to l2 msg hash"); + }); let expected_hash = <[u8; 32]>::from_hex("c51a543ef9563ad2545342b390b67edfcddf9886aa36846cf70382362fc5fab3").unwrap(); diff --git a/crates/madara/client/mempool/src/lib.rs b/crates/madara/client/mempool/src/lib.rs index 2178cb5b0..8eb75b5ef 100644 --- a/crates/madara/client/mempool/src/lib.rs +++ b/crates/madara/client/mempool/src/lib.rs @@ -93,7 +93,7 @@ pub trait MempoolProvider: Send + Sync { paid_fees_on_l1: u128, ) -> Result; fn txs_take_chunk(&self, dest: &mut VecDeque, n: usize); - fn tx_take(&mut self) -> Option; + fn tx_take(&self) -> Option; fn tx_mark_included(&self, contract_address: &Felt); fn txs_re_add( &self, @@ -351,7 +351,6 @@ impl MempoolProvider for Mempool { let nonce = Nonce(Felt::from(tx.nonce)); let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version, paid_fees_on_l1)?; - // L1 Handler nonces represent the ordering of L1 transactions sent by // the core L1 contract. In principle this is a bit strange, as there // currently is only 1 core L1 contract, so all transactions should be @@ -362,8 +361,7 @@ impl MempoolProvider for Mempool { // INFO: L1 nonce are stored differently in the db because of this, which is // why we do not use `retrieve_nonce_readiness`. let nonce_next = nonce.try_increment()?; - let nonce_target = - self.backend.get_l1_messaging_nonce_latest()?.map(|nonce| nonce.try_increment()).unwrap_or(Ok(nonce))?; + let nonce_target = self.backend.get_l1_messaging_nonce_latest()?.unwrap_or(nonce); let nonce_info = if nonce != nonce_target { NonceInfo::pending(nonce, nonce_next) } else { @@ -430,7 +428,7 @@ impl MempoolProvider for Mempool { } #[tracing::instrument(skip(self), fields(module = "Mempool"))] - fn tx_take(&mut self) -> Option { + fn tx_take(&self) -> Option { if let Some(mempool_tx) = self.inner.write().expect("Poisoned lock").pop_next() { let contract_address = mempool_tx.contract_address().to_felt(); let nonce_next = mempool_tx.nonce_next; @@ -729,7 +727,7 @@ mod test { l1_data_provider: Arc, tx_account_v0_valid: blockifier::transaction::transaction_execution::Transaction, ) { - let mut mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); let timestamp = ArrivedAtTimestamp::now(); let result = mempool.accept_tx(tx_account_v0_valid, None, timestamp, NonceInfo::default()); assert_matches::assert_matches!(result, Ok(())); @@ -1407,7 +1405,7 @@ mod test { #[from(tx_account_v0_valid)] tx_ready: blockifier::transaction::transaction_execution::Transaction, #[from(tx_account_v0_valid)] tx_pending: blockifier::transaction::transaction_execution::Transaction, ) { - let mut mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); // Insert pending transaction diff --git a/crates/madara/client/rpc/Cargo.toml b/crates/madara/client/rpc/Cargo.toml index 45f1e4280..9337b9536 100644 --- a/crates/madara/client/rpc/Cargo.toml +++ b/crates/madara/client/rpc/Cargo.toml @@ -45,6 +45,7 @@ starknet-types-rpc = { workspace = true } starknet_api = { workspace = true, default-features = true } # Others +alloy = { workspace = true } anyhow = { workspace = true } bitvec = { workspace = true } jsonrpsee = { workspace = true, default-features = true, features = [ diff --git a/crates/madara/client/rpc/src/RPC.md b/crates/madara/client/rpc/src/RPC.md index a7b88868a..4f867d632 100644 --- a/crates/madara/client/rpc/src/RPC.md +++ b/crates/madara/client/rpc/src/RPC.md @@ -14,7 +14,7 @@ methods exist in isolation from `read` methods for example. _different versions_ of the same RPC method. This is mostly present for ease of development of new RPC versions, but also serves to assure a level of backwards compatibility. To select a specific version of an rpc method, you will need to -append `/rcp/v{version}` to the rpc url you are connecting to. +append `/rpc/v{version}` to the rpc url you are connecting to. **RPC versions are grouped under the `Starknet` struct**. This serves as a common point of implementation for all RPC methods across all versions, and is diff --git a/crates/madara/client/rpc/src/versions/user/v0_8_0/api.rs b/crates/madara/client/rpc/src/versions/user/v0_8_0/api.rs index d968744f3..cd4a21074 100644 --- a/crates/madara/client/rpc/src/versions/user/v0_8_0/api.rs +++ b/crates/madara/client/rpc/src/versions/user/v0_8_0/api.rs @@ -1,8 +1,10 @@ +use alloy::primitives::TxHash; use jsonrpsee::core::RpcResult; use m_proc_macros::versioned_rpc; use mp_block::BlockId; use serde::{Deserialize, Serialize}; use starknet_types_core::felt::Felt; +use starknet_types_rpc::TxnStatus; pub(crate) type NewHead = starknet_types_rpc::BlockHeader; pub(crate) type EmittedEvent = starknet_types_rpc::EmittedEvent; @@ -53,6 +55,13 @@ pub struct GetStorageProofResult { pub global_roots: GlobalRoots, } +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct MessageStatus { + pub transaction_hash: Felt, + pub finality_status: TxnStatus, + pub failure_reason: Option, +} + #[versioned_rpc("V0_8_0", "starknet")] pub trait StarknetWsRpcApi { #[subscription(name = "subscribeNewHeads", unsubscribe = "unsubscribeNewHeads", item = NewHead, param_kind = map)] @@ -83,4 +92,10 @@ pub trait StarknetReadRpcApi { contract_addresses: Option>, contracts_storage_keys: Option>, ) -> RpcResult; + + /// For the given L1 transaction hash, return the associated L1 handler transaction hashes and + /// statuses for all L1 -> L2 messsages sent by the L1 transaction, ordered by the L1 + /// transaction sending order. + #[method(name = "getMessagesStatus")] + fn get_messages_status(&self, transaction_hash: TxHash) -> RpcResult>; } diff --git a/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/read/get_messages_status.rs b/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/read/get_messages_status.rs new file mode 100644 index 000000000..c4b147705 --- /dev/null +++ b/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/read/get_messages_status.rs @@ -0,0 +1,42 @@ +use crate::utils::ResultExt; +use crate::versions::user::{ + v0_7_1::methods::read::get_transaction_status::get_transaction_status, v0_8_0::MessageStatus, +}; +use crate::{Starknet, StarknetRpcApiError, StarknetRpcResult}; +use alloy::primitives::TxHash; + +pub fn get_messages_status(starknet: &Starknet, transaction_hash: TxHash) -> StarknetRpcResult> { + let l1_handler_tx_hashes = starknet + .backend + .get_l1_handler_tx_hashes(transaction_hash) + .or_internal_server_error("Retrieving L1 handler transactions from database")?; + if l1_handler_tx_hashes.is_empty() { + return Err(StarknetRpcApiError::TxnHashNotFound); + } + l1_handler_tx_hashes.iter().try_fold( + Vec::with_capacity(l1_handler_tx_hashes.len()), + |mut acc, l1_handler_tx_hash| { + let finality_status = match get_transaction_status(starknet, *l1_handler_tx_hash) { + Ok(tx_status) => tx_status.finality_status, + Err(StarknetRpcApiError::TxnHashNotFound) => { + tracing::error!("L1 handler tx {l1_handler_tx_hash:?} for L1 tx {transaction_hash:?} not found"); + return Err(StarknetRpcApiError::InternalServerError); + } + Err(e) => { + tracing::error!( + "Failed to retrieve transaction status for L1 handler transaction {l1_handler_tx_hash:?} \ + related to L1 transaction {transaction_hash:?}: {e:?}" + ); + return Err(StarknetRpcApiError::InternalServerError); + } + }; + acc.push(MessageStatus { + transaction_hash: *l1_handler_tx_hash, + finality_status, + // TODO Update this once get_transaction_status supports rejections + failure_reason: None, + }); + Ok(acc) + }, + ) +} diff --git a/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/read/mod.rs b/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/read/mod.rs index aa308dcea..d4f6fc5d7 100644 --- a/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/read/mod.rs +++ b/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/read/mod.rs @@ -1,11 +1,15 @@ -use crate::versions::user::v0_8_0::{ContractStorageKeysItem, GetStorageProofResult, StarknetReadRpcApiV0_8_0Server}; +use crate::versions::user::v0_8_0::{ + ContractStorageKeysItem, GetStorageProofResult, MessageStatus, StarknetReadRpcApiV0_8_0Server, +}; use crate::Starknet; +use alloy::primitives::TxHash; use jsonrpsee::core::{async_trait, RpcResult}; use mp_block::BlockId; use mp_chain_config::RpcVersion; use starknet_types_core::felt::Felt; pub mod get_compiled_casm; +pub mod get_messages_status; pub mod get_storage_proof; #[async_trait] @@ -27,4 +31,8 @@ impl StarknetReadRpcApiV0_8_0Server for Starknet { ) -> RpcResult { get_storage_proof::get_storage_proof(self, block_id, class_hashes, contract_addresses, contracts_storage_keys) } + + fn get_messages_status(&self, transaction_hash: TxHash) -> RpcResult> { + Ok(get_messages_status::get_messages_status(self, transaction_hash)?) + } } diff --git a/crates/madara/node/src/cli/analytics.rs b/crates/madara/node/src/cli/analytics.rs index 02647cc07..095a33431 100644 --- a/crates/madara/node/src/cli/analytics.rs +++ b/crates/madara/node/src/cli/analytics.rs @@ -10,6 +10,6 @@ pub struct AnalyticsParams { pub analytics_service_name: String, /// Endpoint of the analytics server. - #[arg(env = "OTEL_EXPORTER_OTLP_ENDPOINT", long, value_parser = parse_url, default_value = None)] + #[arg(env = "MADARA_ANALYTICS_COLLECTION_ENDPOINT", long, value_parser = parse_url, default_value = None)] pub analytics_collection_endpoint: Option, }