diff --git a/CHANGELOG.md b/CHANGELOG.md index c2b1ea2d8..0b7908014 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,9 @@ ## Next release +- feat(block_production): continue pending block on restart +- feat(mempool): mempool transaction saving on db +- feat(mempool): mempool transaction limits - feat(cli): madaraup quickfix - feat(cli): added madaraup for v0.7.0 - refactor(rpc): replace starknet-rs by starknet-types-rpc diff --git a/Cargo.lock b/Cargo.lock index 6ad8f6ba9..bdb82c521 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5394,6 +5394,7 @@ dependencies = [ "jsonrpsee", "mc-analytics", "mc-block-import", + "mc-block-production", "mc-db", "mc-devnet", "mc-eth", @@ -5516,12 +5517,58 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "mc-block-production" +version = "0.7.0" +dependencies = [ + "anyhow", + "assert_matches", + "bitvec", + "blockifier", + "lazy_static", + "mc-analytics", + "mc-block-import", + "mc-db", + "mc-exec", + "mc-mempool", + "mockall", + "mp-block", + "mp-chain-config", + "mp-class", + "mp-convert", + "mp-receipt", + "mp-state-update", + "mp-transactions", + "mp-utils", + "once_cell", + "opentelemetry", + "opentelemetry-appender-tracing", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "opentelemetry-stdout", + "opentelemetry_sdk", + "proptest", + "proptest-derive", + "rstest 0.18.2", + "serde_json", + "starknet-core", + "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", + "starknet_api", + "thiserror 2.0.3", + "tokio", + "tracing", + "tracing-core", + "tracing-opentelemetry", + "tracing-subscriber", +] + [[package]] name = "mc-db" version = "0.7.0" dependencies = [ "anyhow", "bincode 1.3.3", + "blockifier", "bonsai-trie", "lazy_static", "librocksdb-sys", @@ -5562,6 +5609,7 @@ dependencies = [ "blockifier", "m-cairo-test-contracts", "mc-block-import", + "mc-block-production", "mc-db", "mc-mempool", "mockall", @@ -5671,6 +5719,8 @@ dependencies = [ "mp-chain-config", "mp-class", "mp-convert", + "mp-receipt", + "mp-transactions", "opentelemetry", "opentelemetry-appender-tracing", "opentelemetry-otlp", @@ -5787,6 +5837,7 @@ dependencies = [ "tracing-core", "tracing-opentelemetry", "tracing-subscriber", + "tracing-test", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1786898d3..c6a1d0b2e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ members = [ "crates/proc-macros", "crates/tests", "crates/cairo-test-contracts", + "crates/client/block_production", ] resolver = "2" # Everything except test-related packages, so that they are not compiled when doing `cargo build`. @@ -127,6 +128,7 @@ mc-gateway-server = { path = "crates/client/gateway/server" } mc-sync = { path = "crates/client/sync" } mc-eth = { path = "crates/client/eth" } mc-mempool = { path = "crates/client/mempool" } +mc-block-production = { path = "crates/client/block_production" } mc-block-import = { path = "crates/client/block_import" } mc-devnet = { path = "crates/client/devnet" } @@ -236,6 +238,7 @@ tracing-subscriber = { version = "0.3.18", features = [ "registry", "std", ] } +tracing-test = "0.2.5" tracing-opentelemetry = "0.26.0" [patch.crates-io] diff --git a/configs/chain_config.example.yaml b/configs/chain_config.example.yaml index 0037c8d3b..5e4d35143 100644 --- a/configs/chain_config.example.yaml +++ b/configs/chain_config.example.yaml @@ -69,3 +69,10 @@ bouncer_config: # /!\ Only used for block production. # Address of the sequencer (0x0 for a full node). sequencer_address: "0x0" + +# Transaction limit in the mempool. +mempool_tx_limit: 10000 +# Transaction limit in the mempool, additional limit for declare transactions. +mempool_declare_tx_limit: 20 +# Max age of a transaction in the mempool. +mempool_tx_max_age: "5h" diff --git a/configs/presets/devnet.yaml b/configs/presets/devnet.yaml index 2ef5ab76d..00a580175 100644 --- a/configs/presets/devnet.yaml +++ b/configs/presets/devnet.yaml @@ -29,3 +29,6 @@ bouncer_config: sequencer_address: "0x123" eth_core_contract_address: "0xe7f1725e7734ce288f8367e1bb143e90bb3f0512" eth_gps_statement_verifier: "0xf294781D719D2F4169cE54469C28908E6FA752C1" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/configs/presets/integration.yaml b/configs/presets/integration.yaml index 981a5ae14..a1ecbdc42 100644 --- a/configs/presets/integration.yaml +++ b/configs/presets/integration.yaml @@ -29,3 +29,6 @@ bouncer_config: sequencer_address: "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8" eth_core_contract_address: "0x4737c0c1B4D5b1A687B42610DdabEE781152359c" eth_gps_statement_verifier: "0x2046B966994Adcb88D83f467a41b75d64C2a619F" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/configs/presets/mainnet.yaml b/configs/presets/mainnet.yaml index 716b6a85a..1d0323598 100644 --- a/configs/presets/mainnet.yaml +++ b/configs/presets/mainnet.yaml @@ -29,3 +29,6 @@ bouncer_config: sequencer_address: "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8" eth_core_contract_address: "0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4" eth_gps_statement_verifier: "0x47312450B3Ac8b5b8e247a6bB6d523e7605bDb60" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/configs/presets/sepolia.yaml b/configs/presets/sepolia.yaml index f4aded9cf..da1b25f0b 100644 --- a/configs/presets/sepolia.yaml +++ b/configs/presets/sepolia.yaml @@ -29,3 +29,6 @@ bouncer_config: sequencer_address: "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8" eth_core_contract_address: "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057" eth_gps_statement_verifier: "0xf294781D719D2F4169cE54469C28908E6FA752C1" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/crates/client/block_import/src/pre_validate.rs b/crates/client/block_import/src/pre_validate.rs index 2556b96c3..ac423e392 100644 --- a/crates/client/block_import/src/pre_validate.rs +++ b/crates/client/block_import/src/pre_validate.rs @@ -77,6 +77,7 @@ pub fn pre_validate_inner( unverified_global_state_root: block.commitments.global_state_root, unverified_block_hash: block.commitments.block_hash, unverified_block_number: block.unverified_block_number, + visited_segments: block.visited_segments, }) } @@ -97,6 +98,7 @@ pub fn pre_validate_pending_inner( state_diff: block.state_diff, receipts: block.receipts, converted_classes, + visited_segments: block.visited_segments, }) } diff --git a/crates/client/block_import/src/tests/block_import_utils.rs b/crates/client/block_import/src/tests/block_import_utils.rs index 07d28dbd3..9de8d8e3b 100644 --- a/crates/client/block_import/src/tests/block_import_utils.rs +++ b/crates/client/block_import/src/tests/block_import_utils.rs @@ -104,6 +104,7 @@ pub fn create_dummy_block() -> PreValidatedBlock { receipts: vec![], state_diff: StateDiff::default(), converted_classes: Default::default(), + visited_segments: None, } } @@ -128,6 +129,7 @@ pub fn create_dummy_unverified_full_block() -> UnverifiedFullBlock { declared_classes: vec![], commitments: UnverifiedCommitments::default(), trusted_converted_classes: vec![], + visited_segments: None, } } @@ -151,5 +153,6 @@ pub fn create_dummy_pending_block() -> PreValidatedPendingBlock { receipts: vec![], state_diff: StateDiff::default(), converted_classes: Default::default(), + visited_segments: None, } } diff --git a/crates/client/block_import/src/types.rs b/crates/client/block_import/src/types.rs index 3e0419a57..462169220 100644 --- a/crates/client/block_import/src/types.rs +++ b/crates/client/block_import/src/types.rs @@ -3,7 +3,7 @@ use mp_block::{ header::{GasPrices, L1DataAvailabilityMode}, - Header, + Header, VisitedSegments, }; use mp_chain_config::StarknetVersion; use mp_class::{ @@ -142,13 +142,14 @@ pub struct UnverifiedCommitments { } /// An unverified pending full block as input for the block import pipeline. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Eq, PartialEq, Default, Serialize, Deserialize)] pub struct UnverifiedPendingFullBlock { pub header: UnverifiedHeader, pub state_diff: StateDiff, pub transactions: Vec, pub receipts: Vec, pub declared_classes: Vec, + pub visited_segments: Option, } /// An unverified full block as input for the block import pipeline. @@ -165,6 +166,7 @@ pub struct UnverifiedFullBlock { #[serde(skip)] pub trusted_converted_classes: Vec, pub commitments: UnverifiedCommitments, + pub visited_segments: Option, } // Pre-validate outputs. @@ -192,6 +194,7 @@ pub struct PreValidatedBlock { pub unverified_global_state_root: Option, pub unverified_block_hash: Option, pub unverified_block_number: Option, + pub visited_segments: Option, } /// Output of the [`crate::pre_validate`] step. @@ -202,6 +205,7 @@ pub struct PreValidatedPendingBlock { pub state_diff: StateDiff, pub receipts: Vec, pub converted_classes: Vec, + pub visited_segments: Option, } // Verify-apply output. diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index 9113b88ac..f830d23ab 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -101,6 +101,8 @@ pub fn verify_apply_inner( }, block.state_diff, block.converted_classes, + block.visited_segments, + None, ) .map_err(make_db_error("storing block in db"))?; @@ -144,6 +146,8 @@ pub fn verify_apply_pending_inner( }, block.state_diff, block.converted_classes, + block.visited_segments, + None, ) .map_err(make_db_error("storing block in db"))?; @@ -407,7 +411,7 @@ mod verify_apply_tests { if populate_db { let header = create_dummy_header(); let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None, None).unwrap(); } // Create a validation context with the specified ignore_block_order flag @@ -661,7 +665,7 @@ mod verify_apply_tests { let mut header = create_dummy_header(); header.block_number = 0; let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -687,7 +691,7 @@ mod verify_apply_tests { let mut header = create_dummy_header(); header.block_number = 0; let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -723,7 +727,7 @@ mod verify_apply_tests { let mut genesis_header = create_dummy_header(); genesis_header.block_number = 0; let genesis_block = finalized_block_zero(genesis_header.clone()); - backend.store_block(genesis_block, finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -769,7 +773,7 @@ mod verify_apply_tests { let mut genesis_header = create_dummy_header(); genesis_header.block_number = 0; let genesis_block = finalized_block_zero(genesis_header.clone()); - backend.store_block(genesis_block, finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); diff --git a/crates/client/block_production/Cargo.toml b/crates/client/block_production/Cargo.toml new file mode 100644 index 000000000..ca4311a1a --- /dev/null +++ b/crates/client/block_production/Cargo.toml @@ -0,0 +1,78 @@ +[package] +name = "mc-block-production" +description = "Madara client block production service" +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +version.workspace = true +homepage.workspace = true + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dev-dependencies] + +rstest = { workspace = true } +mc-db = { workspace = true, features = ["testing"] } +tokio = { workspace = true, features = ["rt-multi-thread"] } +proptest.workspace = true +proptest-derive.workspace = true +bitvec.workspace = true +blockifier = { workspace = true, features = ["testing"] } +mockall.workspace = true +assert_matches.workspace = true +lazy_static.workspace = true +serde_json.workspace = true + +[features] +testing = ["blockifier/testing", "mc-db/testing", "mockall"] + +[dependencies] + +# Madara +mc-analytics.workspace = true +mc-block-import.workspace = true +mc-db.workspace = true +mc-exec.workspace = true +mc-mempool.workspace = true +mp-block.workspace = true +mp-chain-config.workspace = true +mp-class.workspace = true +mp-convert.workspace = true +mp-receipt.workspace = true +mp-state-update.workspace = true +mp-transactions.workspace = true +mp-utils.workspace = true + +# Starknet +blockifier.workspace = true +starknet-core.workspace = true +starknet-types-core.workspace = true +starknet_api.workspace = true + +# Other +anyhow.workspace = true +mockall = { workspace = true, optional = true } +thiserror.workspace = true +tokio.workspace = true + +# Instrumentation +once_cell = { workspace = true } +opentelemetry = { workspace = true, features = ["metrics", "logs"] } +opentelemetry-appender-tracing = { workspace = true, default-features = false } +opentelemetry-otlp = { workspace = true, features = [ + "tonic", + "metrics", + "logs", +] } +opentelemetry-semantic-conventions = { workspace = true } +opentelemetry-stdout = { workspace = true } +opentelemetry_sdk = { workspace = true, features = ["rt-tokio", "logs"] } +tracing = { workspace = true } +tracing-core = { workspace = true, default-features = false } +tracing-opentelemetry = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/client/mempool/src/close_block.rs b/crates/client/block_production/src/close_block.rs similarity index 94% rename from crates/client/mempool/src/close_block.rs rename to crates/client/block_production/src/close_block.rs index c828c7b86..ffc520497 100644 --- a/crates/client/mempool/src/close_block.rs +++ b/crates/client/block_production/src/close_block.rs @@ -1,7 +1,7 @@ use mc_block_import::{ BlockImportError, BlockImportResult, BlockImporter, BlockValidationContext, UnverifiedFullBlock, UnverifiedHeader, }; -use mp_block::{header::PendingHeader, MadaraPendingBlock, MadaraPendingBlockInfo}; +use mp_block::{header::PendingHeader, MadaraPendingBlock, MadaraPendingBlockInfo, VisitedSegments}; use mp_class::ConvertedClass; use mp_state_update::StateDiff; use starknet_api::core::ChainId; @@ -15,6 +15,7 @@ pub async fn close_block( chain_id: ChainId, block_number: u64, declared_classes: Vec, + visited_segments: VisitedSegments, ) -> Result { let validation = BlockValidationContext::new(chain_id).trust_transaction_hashes(true); @@ -48,6 +49,7 @@ pub async fn close_block( receipts: inner.receipts, trusted_converted_classes: declared_classes, commitments: Default::default(), // the block importer will compute the commitments for us + visited_segments: Some(visited_segments), ..Default::default() }, validation.clone(), diff --git a/crates/client/block_production/src/finalize_execution_state.rs b/crates/client/block_production/src/finalize_execution_state.rs new file mode 100644 index 000000000..601933e30 --- /dev/null +++ b/crates/client/block_production/src/finalize_execution_state.rs @@ -0,0 +1,320 @@ +use crate::Error; +use blockifier::{ + blockifier::transaction_executor::{TransactionExecutor, BLOCK_STATE_ACCESS_ERR}, + bouncer::BouncerWeights, + state::{cached_state::StateMaps, state_api::StateReader}, + transaction::errors::TransactionExecutionError, +}; +use mc_db::{db_block_id::DbBlockId, MadaraBackend}; +use mp_block::{VisitedSegmentEntry, VisitedSegments}; +use mp_convert::ToFelt; +use mp_state_update::{ + ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StateDiff, + StorageEntry, +}; +use starknet_api::core::{ClassHash, CompiledClassHash, ContractAddress, Nonce}; +use std::collections::{hash_map, HashMap}; + +#[derive(Debug, thiserror::Error)] +#[error("Error converting state diff to state map")] +pub struct StateDiffToStateMapError; + +pub fn state_diff_to_state_map(diff: StateDiff) -> Result { + let nonces = diff + .nonces + .into_iter() + .map(|entry| Ok((entry.contract_address.try_into().map_err(|_| StateDiffToStateMapError)?, Nonce(entry.nonce)))) + .collect::>()?; + let class_hashes = diff + .deployed_contracts + .into_iter() + .map(|entry| Ok((entry.address.try_into().map_err(|_| StateDiffToStateMapError)?, ClassHash(entry.class_hash)))) + .chain(diff.replaced_classes.into_iter().map(|entry| { + Ok((entry.contract_address.try_into().map_err(|_| StateDiffToStateMapError)?, ClassHash(entry.class_hash))) + })) + .collect::>()?; + let storage = diff + .storage_diffs + .into_iter() + .flat_map(|d| { + d.storage_entries.into_iter().map(move |e| { + Ok(( + ( + d.address.try_into().map_err(|_| StateDiffToStateMapError)?, + e.key.try_into().map_err(|_| StateDiffToStateMapError)?, + ), + e.value, + )) + }) + }) + .collect::>()?; + let declared_contracts = diff.declared_classes.iter().map(|d| (ClassHash(d.class_hash), true)).collect(); + let compiled_class_hashes = diff + .declared_classes + .into_iter() + .map(|d| (ClassHash(d.class_hash), CompiledClassHash(d.compiled_class_hash))) + .collect(); + Ok(StateMaps { nonces, class_hashes, storage, declared_contracts, compiled_class_hashes }) +} + +pub(crate) fn state_map_to_state_diff( + backend: &MadaraBackend, + on_top_of: &Option, + diff: StateMaps, +) -> Result { + let mut backing_map = HashMap::::default(); + let mut storage_diffs = Vec::::default(); + for ((address, key), value) in diff.storage { + match backing_map.entry(address) { + hash_map::Entry::Vacant(e) => { + e.insert(storage_diffs.len()); + storage_diffs.push(ContractStorageDiffItem { + address: address.to_felt(), + storage_entries: vec![StorageEntry { key: key.to_felt(), value }], + }); + } + hash_map::Entry::Occupied(e) => { + storage_diffs[*e.get()].storage_entries.push(StorageEntry { key: key.to_felt(), value }); + } + } + } + + let mut deprecated_declared_classes = Vec::default(); + for (class_hash, _) in diff.declared_contracts { + if !diff.compiled_class_hashes.contains_key(&class_hash) { + deprecated_declared_classes.push(class_hash.to_felt()); + } + } + + let declared_classes = diff + .compiled_class_hashes + .iter() + .map(|(class_hash, compiled_class_hash)| DeclaredClassItem { + class_hash: class_hash.to_felt(), + compiled_class_hash: compiled_class_hash.to_felt(), + }) + .collect(); + + let nonces = diff + .nonces + .into_iter() + .map(|(contract_address, nonce)| NonceUpdate { + contract_address: contract_address.to_felt(), + nonce: nonce.to_felt(), + }) + .collect(); + + let mut deployed_contracts = Vec::new(); + let mut replaced_classes = Vec::new(); + for (contract_address, new_class_hash) in diff.class_hashes { + let replaced = if let Some(on_top_of) = on_top_of { + backend.get_contract_class_hash_at(on_top_of, &contract_address.to_felt())?.is_some() + } else { + // Executing genesis block: nothing being redefined here + false + }; + if replaced { + replaced_classes.push(ReplacedClassItem { + contract_address: contract_address.to_felt(), + class_hash: new_class_hash.to_felt(), + }) + } else { + deployed_contracts.push(DeployedContractItem { + address: contract_address.to_felt(), + class_hash: new_class_hash.to_felt(), + }) + } + } + + Ok(StateDiff { + storage_diffs, + deprecated_declared_classes, + declared_classes, + nonces, + deployed_contracts, + replaced_classes, + }) +} + +fn get_visited_segments(tx_executor: &mut TransactionExecutor) -> Result { + let visited_segments = tx_executor + .block_state + .as_ref() + .expect(BLOCK_STATE_ACCESS_ERR) + .visited_pcs + .iter() + .map(|(class_hash, class_visited_pcs)| -> Result<_, Error> { + let contract_class = tx_executor + .block_state + .as_ref() + .expect(BLOCK_STATE_ACCESS_ERR) + .get_compiled_contract_class(*class_hash) + .map_err(TransactionExecutionError::StateError)?; + Ok(VisitedSegmentEntry { + class_hash: class_hash.to_felt(), + segments: contract_class.get_visited_segments(class_visited_pcs)?, + }) + }) + .collect::>()?; + + Ok(VisitedSegments(visited_segments)) +} + +pub(crate) fn finalize_execution_state( + tx_executor: &mut TransactionExecutor, + backend: &MadaraBackend, + on_top_of: &Option, +) -> Result<(StateDiff, VisitedSegments, BouncerWeights), Error> { + let state_map = tx_executor + .block_state + .as_mut() + .expect(BLOCK_STATE_ACCESS_ERR) + .to_state_diff() + .map_err(TransactionExecutionError::StateError)?; + let state_update = state_map_to_state_diff(backend, on_top_of, state_map)?; + + let visited_segments = get_visited_segments(tx_executor)?; + + Ok((state_update, visited_segments, *tx_executor.bouncer.get_accumulated_weights())) +} + +#[cfg(test)] +mod test { + use std::{collections::HashMap, sync::Arc}; + + use blockifier::{compiled_class_hash, nonce, state::cached_state::StateMaps, storage_key}; + use mc_db::MadaraBackend; + use mp_chain_config::ChainConfig; + use mp_convert::ToFelt; + use mp_state_update::{ + ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, StateDiff, StorageEntry, + }; + use starknet_api::{ + class_hash, contract_address, + core::{ClassHash, ContractAddress, PatriciaKey}, + felt, patricia_key, + }; + use starknet_core::types::Felt; + + #[test] + fn state_map_to_state_diff() { + let backend = MadaraBackend::open_for_testing(Arc::new(ChainConfig::madara_test())); + + let mut nonces = HashMap::new(); + nonces.insert(contract_address!(1u32), nonce!(1)); + nonces.insert(contract_address!(2u32), nonce!(2)); + nonces.insert(contract_address!(3u32), nonce!(3)); + + let mut class_hashes = HashMap::new(); + class_hashes.insert(contract_address!(1u32), class_hash!("0xc1a551")); + class_hashes.insert(contract_address!(2u32), class_hash!("0xc1a552")); + class_hashes.insert(contract_address!(3u32), class_hash!("0xc1a553")); + + let mut storage = HashMap::new(); + storage.insert((contract_address!(1u32), storage_key!(1u32)), felt!(1u32)); + storage.insert((contract_address!(1u32), storage_key!(2u32)), felt!(2u32)); + storage.insert((contract_address!(1u32), storage_key!(3u32)), felt!(3u32)); + + storage.insert((contract_address!(2u32), storage_key!(1u32)), felt!(1u32)); + storage.insert((contract_address!(2u32), storage_key!(2u32)), felt!(2u32)); + storage.insert((contract_address!(2u32), storage_key!(3u32)), felt!(3u32)); + + storage.insert((contract_address!(3u32), storage_key!(1u32)), felt!(1u32)); + storage.insert((contract_address!(3u32), storage_key!(2u32)), felt!(2u32)); + storage.insert((contract_address!(3u32), storage_key!(3u32)), felt!(3u32)); + + let mut compiled_class_hashes = HashMap::new(); + // "0xc1a553" is marked as deprecated by not having a compiled + // class hashe + compiled_class_hashes.insert(class_hash!("0xc1a551"), compiled_class_hash!(0x1)); + compiled_class_hashes.insert(class_hash!("0xc1a552"), compiled_class_hash!(0x2)); + + let mut declared_contracts = HashMap::new(); + declared_contracts.insert(class_hash!("0xc1a551"), true); + declared_contracts.insert(class_hash!("0xc1a552"), true); + declared_contracts.insert(class_hash!("0xc1a553"), true); + + let state_map = StateMaps { nonces, class_hashes, storage, compiled_class_hashes, declared_contracts }; + + let storage_diffs = vec![ + ContractStorageDiffItem { + address: felt!(1u32), + storage_entries: vec![ + StorageEntry { key: felt!(1u32), value: Felt::ONE }, + StorageEntry { key: felt!(2u32), value: Felt::TWO }, + StorageEntry { key: felt!(3u32), value: Felt::THREE }, + ], + }, + ContractStorageDiffItem { + address: felt!(2u32), + storage_entries: vec![ + StorageEntry { key: felt!(1u32), value: Felt::ONE }, + StorageEntry { key: felt!(2u32), value: Felt::TWO }, + StorageEntry { key: felt!(3u32), value: Felt::THREE }, + ], + }, + ContractStorageDiffItem { + address: felt!(3u32), + storage_entries: vec![ + StorageEntry { key: felt!(1u32), value: Felt::ONE }, + StorageEntry { key: felt!(2u32), value: Felt::TWO }, + StorageEntry { key: felt!(3u32), value: Felt::THREE }, + ], + }, + ]; + + let deprecated_declared_classes = vec![class_hash!("0xc1a553").to_felt()]; + + let declared_classes = vec![ + DeclaredClassItem { + class_hash: class_hash!("0xc1a551").to_felt(), + compiled_class_hash: compiled_class_hash!(0x1).to_felt(), + }, + DeclaredClassItem { + class_hash: class_hash!("0xc1a552").to_felt(), + compiled_class_hash: compiled_class_hash!(0x2).to_felt(), + }, + ]; + + let nonces = vec![ + NonceUpdate { contract_address: felt!(1u32), nonce: felt!(1u32) }, + NonceUpdate { contract_address: felt!(2u32), nonce: felt!(2u32) }, + NonceUpdate { contract_address: felt!(3u32), nonce: felt!(3u32) }, + ]; + + let deployed_contracts = vec![ + DeployedContractItem { address: felt!(1u32), class_hash: class_hash!("0xc1a551").to_felt() }, + DeployedContractItem { address: felt!(2u32), class_hash: class_hash!("0xc1a552").to_felt() }, + DeployedContractItem { address: felt!(3u32), class_hash: class_hash!("0xc1a553").to_felt() }, + ]; + + let replaced_classes = vec![]; + + let expected = StateDiff { + storage_diffs, + deprecated_declared_classes, + declared_classes, + nonces, + deployed_contracts, + replaced_classes, + }; + + let mut actual = super::state_map_to_state_diff(&backend, &Option::<_>::None, state_map).unwrap(); + + actual.storage_diffs.sort_by(|a, b| a.address.cmp(&b.address)); + actual.storage_diffs.iter_mut().for_each(|s| s.storage_entries.sort_by(|a, b| a.key.cmp(&b.key))); + actual.deprecated_declared_classes.sort(); + actual.declared_classes.sort_by(|a, b| a.class_hash.cmp(&b.class_hash)); + actual.nonces.sort_by(|a, b| a.contract_address.cmp(&b.contract_address)); + actual.deployed_contracts.sort_by(|a, b| a.address.cmp(&b.address)); + actual.replaced_classes.sort_by(|a, b| a.contract_address.cmp(&b.contract_address)); + + assert_eq!( + actual, + expected, + "actual: {}\nexpected: {}", + serde_json::to_string_pretty(&actual).unwrap_or_default(), + serde_json::to_string_pretty(&expected).unwrap_or_default() + ); + } +} diff --git a/crates/client/mempool/src/block_production.rs b/crates/client/block_production/src/lib.rs similarity index 76% rename from crates/client/mempool/src/block_production.rs rename to crates/client/block_production/src/lib.rs index e80ceaabe..e7d310b83 100644 --- a/crates/client/mempool/src/block_production.rs +++ b/crates/client/block_production/src/lib.rs @@ -1,41 +1,54 @@ -// TODO: Move this into its own crate. +//! Block production service. +//! +//! # Testing +//! +//! Testing is done in a few places: +//! - devnet has a few tests for declare transactions and basic transfers as of now. This is proobably +//! the simplest place where we could add more tests about block-time, mempool saving to db and such. +//! - e2e tests test a few transactions, with the rpc/gateway in scope too. +//! - js-tests in the CI, not that much in depth +//! - higher level, block production is more hreavily tested (currently outside of the CI) by running the +//! bootstrapper and the kakarot test suite. This is the only place where L1-L2 messaging is really tested +//! as of now. We should make better tests around this area. +//! +//! There are no tests in this crate because they would require a proper genesis block. Devnet provides that, +//! so that's where block-production integration tests are the simplest to add. +//! L1-L2 testing is a bit harder to setup, but we should definitely make the testing more comprehensive here. -use crate::block_production_metrics::BlockProductionMetrics; use crate::close_block::close_block; -use crate::header::make_pending_header; -use crate::{L1DataProvider, MempoolProvider, MempoolTransaction}; -use blockifier::blockifier::transaction_executor::{TransactionExecutor, VisitedSegmentsMapping}; -use blockifier::bouncer::{Bouncer, BouncerWeights, BuiltinCount}; -use blockifier::state::cached_state::StateMaps; -use blockifier::state::state_api::StateReader; +use crate::metrics::BlockProductionMetrics; +use blockifier::blockifier::transaction_executor::{TransactionExecutor, BLOCK_STATE_ACCESS_ERR}; +use blockifier::bouncer::{BouncerWeights, BuiltinCount}; +use blockifier::state::state_api::UpdatableState; use blockifier::transaction::errors::TransactionExecutionError; +use finalize_execution_state::{state_diff_to_state_map, StateDiffToStateMapError}; use mc_block_import::{BlockImportError, BlockImporter}; use mc_db::db_block_id::DbBlockId; use mc_db::{MadaraBackend, MadaraStorageError}; use mc_exec::{BlockifierStateAdapter, ExecutionContext}; -use mp_block::{BlockId, BlockTag, MadaraPendingBlock}; -use mp_class::ConvertedClass; +use mc_mempool::header::make_pending_header; +use mc_mempool::{L1DataProvider, MempoolProvider}; +use mp_block::{BlockId, BlockTag, MadaraMaybePendingBlockInfo, MadaraPendingBlock, VisitedSegments}; +use mp_class::compile::ClassCompilationError; +use mp_class::{ConvertedClass, LegacyConvertedClass, SierraConvertedClass}; use mp_convert::ToFelt; use mp_receipt::from_blockifier_execution_info; -use mp_state_update::{ - ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StateDiff, - StorageEntry, -}; +use mp_state_update::{ContractStorageDiffItem, StateDiff, StorageEntry}; use mp_transactions::TransactionWithHash; use mp_utils::graceful_shutdown; use mp_utils::service::ServiceContext; use opentelemetry::KeyValue; -use starknet_api::core::ContractAddress; +use starknet_api::core::ClassHash; use starknet_types_core::felt::Felt; use std::borrow::Cow; -use std::collections::hash_map; -use std::collections::HashMap; use std::collections::VecDeque; use std::mem; use std::sync::Arc; use std::time::Instant; -use crate::clone_transaction; +mod close_block; +mod finalize_execution_state; +pub mod metrics; #[derive(Default, Clone)] struct ContinueBlockStats { @@ -61,130 +74,11 @@ pub enum Error { Import(#[from] mc_block_import::BlockImportError), #[error("Unexpected error: {0:#}")] Unexpected(Cow<'static, str>), + #[error("Class compilation error when continuing the pending block: {0:#}")] + PendingClassCompilationError(#[from] ClassCompilationError), + #[error("State diff error when continuing the pending block: {0:#}")] + PendingStateDiff(#[from] StateDiffToStateMapError), } - -fn state_map_to_state_diff( - backend: &MadaraBackend, - on_top_of: &Option, - diff: StateMaps, -) -> Result { - let mut backing_map = HashMap::::default(); - let mut storage_diffs = Vec::::default(); - for ((address, key), value) in diff.storage { - match backing_map.entry(address) { - hash_map::Entry::Vacant(e) => { - e.insert(storage_diffs.len()); - storage_diffs.push(ContractStorageDiffItem { - address: address.to_felt(), - storage_entries: vec![StorageEntry { key: key.to_felt(), value }], - }); - } - hash_map::Entry::Occupied(e) => { - storage_diffs[*e.get()].storage_entries.push(StorageEntry { key: key.to_felt(), value }); - } - } - } - - let mut deprecated_declared_classes = Vec::default(); - for (class_hash, _) in diff.declared_contracts { - if !diff.compiled_class_hashes.contains_key(&class_hash) { - deprecated_declared_classes.push(class_hash.to_felt()); - } - } - - let declared_classes = diff - .compiled_class_hashes - .iter() - .map(|(class_hash, compiled_class_hash)| DeclaredClassItem { - class_hash: class_hash.to_felt(), - compiled_class_hash: compiled_class_hash.to_felt(), - }) - .collect(); - - let nonces = diff - .nonces - .into_iter() - .map(|(contract_address, nonce)| NonceUpdate { - contract_address: contract_address.to_felt(), - nonce: nonce.to_felt(), - }) - .collect(); - - let mut deployed_contracts = Vec::new(); - let mut replaced_classes = Vec::new(); - for (contract_address, new_class_hash) in diff.class_hashes { - let replaced = if let Some(on_top_of) = on_top_of { - backend.get_contract_class_hash_at(on_top_of, &contract_address.to_felt())?.is_some() - } else { - // Executing genesis block: nothing being redefined here - false - }; - if replaced { - replaced_classes.push(ReplacedClassItem { - contract_address: contract_address.to_felt(), - class_hash: new_class_hash.to_felt(), - }) - } else { - deployed_contracts.push(DeployedContractItem { - address: contract_address.to_felt(), - class_hash: new_class_hash.to_felt(), - }) - } - } - - Ok(StateDiff { - storage_diffs, - deprecated_declared_classes, - declared_classes, - nonces, - deployed_contracts, - replaced_classes, - }) -} - -pub const BLOCK_STATE_ACCESS_ERR: &str = "Error: The block state should be `Some`."; -fn get_visited_segments( - tx_executor: &mut TransactionExecutor, -) -> Result { - let visited_segments = tx_executor - .block_state - .as_ref() - .expect(BLOCK_STATE_ACCESS_ERR) - .visited_pcs - .iter() - .map(|(class_hash, class_visited_pcs)| -> Result<_, Error> { - let contract_class = tx_executor - .block_state - .as_ref() - .expect(BLOCK_STATE_ACCESS_ERR) - .get_compiled_contract_class(*class_hash) - .map_err(TransactionExecutionError::StateError)?; - Ok((*class_hash, contract_class.get_visited_segments(class_visited_pcs)?)) - }) - .collect::>()?; - - Ok(visited_segments) -} - -fn finalize_execution_state( - _executed_txs: &[MempoolTransaction], - tx_executor: &mut TransactionExecutor, - backend: &MadaraBackend, - on_top_of: &Option, -) -> Result<(StateDiff, VisitedSegmentsMapping, BouncerWeights), Error> { - let state_map = tx_executor - .block_state - .as_mut() - .expect(BLOCK_STATE_ACCESS_ERR) - .to_state_diff() - .map_err(TransactionExecutionError::StateError)?; - let state_update = state_map_to_state_diff(backend, on_top_of, state_map)?; - - let visited_segments = get_visited_segments(tx_executor)?; - - Ok((state_update, visited_segments, *tx_executor.bouncer.get_accumulated_weights())) -} - /// The block production task consumes transactions from the mempool in batches. /// This is to allow optimistic concurrency. However, the block may get full during batch execution, /// and we need to re-add the transactions back into the mempool. @@ -210,10 +104,6 @@ impl BlockProductionTask { self.current_pending_tick = n; } - #[tracing::instrument( - skip(backend, importer, mempool, l1_data_provider, metrics), - fields(module = "BlockProductionTask") - )] pub fn new( backend: Arc, importer: Arc, @@ -221,24 +111,78 @@ impl BlockProductionTask { metrics: BlockProductionMetrics, l1_data_provider: Arc, ) -> Result { - let parent_block_hash = backend - .get_block_hash(&BlockId::Tag(BlockTag::Latest))? - .unwrap_or(/* genesis block's parent hash */ Felt::ZERO); - let pending_block = MadaraPendingBlock::new_empty(make_pending_header( - parent_block_hash, - backend.chain_config(), - l1_data_provider.as_ref(), - )); - // NB: we cannot continue a previously started pending block yet. - // let pending_block = backend.get_or_create_pending_block(|| CreatePendingBlockExtraInfo { - // l1_gas_price: l1_data_provider.get_gas_prices(), - // l1_da_mode: l1_data_provider.get_da_mode(), - // })?; + let (pending_block, state_diff, pcs) = match backend.get_block(&DbBlockId::Pending)? { + Some(pending) => { + let MadaraMaybePendingBlockInfo::Pending(info) = pending.info else { + return Err(Error::Unexpected("Get a pending block".into())); + }; + let pending_state_update = backend.get_pending_block_state_update()?; + (MadaraPendingBlock { info, inner: pending.inner }, pending_state_update, Default::default()) + } + None => { + let parent_block_hash = backend + .get_block_hash(&BlockId::Tag(BlockTag::Latest))? + .unwrap_or(/* genesis block's parent hash */ Felt::ZERO); + + ( + MadaraPendingBlock::new_empty(make_pending_header( + parent_block_hash, + backend.chain_config(), + l1_data_provider.as_ref(), + )), + StateDiff::default(), + Default::default(), + ) + } + }; + + let declared_classes: Vec = state_diff + .declared_classes + .iter() + .map(|item| { + let class_info = backend.get_class_info(&DbBlockId::Pending, &item.class_hash)?.ok_or_else(|| { + Error::Unexpected(format!("No class info for declared class {:#x}", item.class_hash).into()) + })?; + let converted_class = match class_info { + mp_class::ClassInfo::Sierra(info) => { + let compiled = + backend.get_sierra_compiled(&DbBlockId::Pending, &item.class_hash)?.ok_or_else(|| { + Error::Unexpected( + format!("No compiled class for declared class {:#x}", item.class_hash).into(), + ) + })?; + let compiled = Arc::new(compiled); + ConvertedClass::Sierra(SierraConvertedClass { class_hash: item.class_hash, info, compiled }) + } + mp_class::ClassInfo::Legacy(info) => { + ConvertedClass::Legacy(LegacyConvertedClass { class_hash: item.class_hash, info }) + } + }; + + Ok(converted_class) + }) + .collect::>()?; + + let class_hash_to_class = declared_classes + .iter() + .map(|c| { + Ok(( + ClassHash(c.class_hash()), + match c { + ConvertedClass::Legacy(class) => class.info.contract_class.to_blockifier_class()?, + ConvertedClass::Sierra(class) => class.compiled.to_blockifier_class()?, + }, + )) + }) + .collect::>()?; + let mut executor = ExecutionContext::new_in_block(Arc::clone(&backend), &pending_block.info.clone().into())?.tx_executor(); + let block_state = + executor.block_state.as_mut().expect("Block state can not be None unless we take ownership of it"); - let bouncer_config = backend.chain_config().bouncer_config.clone(); - executor.bouncer = Bouncer::new(bouncer_config); + // Apply pending state + block_state.apply_writes(&state_diff_to_state_map(state_diff)?, &class_hash_to_class, &pcs); Ok(Self { importer, @@ -247,14 +191,17 @@ impl BlockProductionTask { executor, current_pending_tick: 0, block: pending_block, - declared_classes: vec![], + declared_classes, l1_data_provider, metrics, }) } #[tracing::instrument(skip(self), fields(module = "BlockProductionTask"))] - fn continue_block(&mut self, bouncer_cap: BouncerWeights) -> Result<(StateDiff, ContinueBlockStats), Error> { + fn continue_block( + &mut self, + bouncer_cap: BouncerWeights, + ) -> Result<(StateDiff, VisitedSegments, BouncerWeights, ContinueBlockStats), Error> { let mut stats = ContinueBlockStats::default(); self.executor.bouncer.bouncer_config.block_max_capacity = bouncer_cap; @@ -265,6 +212,8 @@ impl BlockProductionTask { // This does not need to be outside the loop, but that saves an allocation let mut executed_txs = Vec::with_capacity(batch_size); + // Cloning transactions: That's a lot of cloning, but we're kind of forced to do that because blockifier takes + // a `&[Transaction]` slice. In addition, declare transactions have their class behind an Arc. loop { // Take transactions from mempool. let to_take = batch_size.saturating_sub(txs_to_process.len()); @@ -272,8 +221,7 @@ impl BlockProductionTask { if to_take > 0 { self.mempool.take_txs_chunk(/* extend */ &mut txs_to_process, batch_size); - txs_to_process_blockifier - .extend(txs_to_process.iter().skip(cur_len).map(|tx| clone_transaction(&tx.tx))); + txs_to_process_blockifier.extend(txs_to_process.iter().skip(cur_len).map(|tx| tx.clone_tx())); } if txs_to_process.is_empty() { @@ -293,6 +241,10 @@ impl BlockProductionTask { for exec_result in all_results { let mut mempool_tx = txs_to_process.pop_front().ok_or_else(|| Error::Unexpected("Vector length mismatch".into()))?; + + // Remove tx from mempool + self.backend.remove_mempool_transaction(&mempool_tx.tx_hash().to_felt())?; + match exec_result { Ok(execution_info) => { // Reverted transactions appear here as Ok too. @@ -310,8 +262,8 @@ impl BlockProductionTask { self.block .inner .receipts - .push(from_blockifier_execution_info(&execution_info, &clone_transaction(&mempool_tx.tx))); - let converted_tx = TransactionWithHash::from(clone_transaction(&mempool_tx.tx)); // TODO: too many tx clones! + .push(from_blockifier_execution_info(&execution_info, &mempool_tx.clone_tx())); + let converted_tx = TransactionWithHash::from(mempool_tx.clone_tx()); self.block.info.tx_hashes.push(converted_tx.hash); self.block.inner.transactions.push(converted_tx.transaction); } @@ -336,20 +288,14 @@ impl BlockProductionTask { } } - // Add back the unexecuted transactions to the mempool. - stats.n_re_added_to_mempool = txs_to_process.len(); - self.mempool.re_add_txs(txs_to_process); + let on_top_of = self.executor.block_state.as_ref().expect(BLOCK_STATE_ACCESS_ERR).state.on_top_of_block_id; - let on_top_of = self - .executor - .block_state - .as_ref() - .expect("Block state can not be None unless we take ownership of it") - .state - .on_top_of_block_id; + let (state_diff, visited_segments, bouncer_weights) = + finalize_execution_state::finalize_execution_state(&mut self.executor, &self.backend, &on_top_of)?; - let (state_diff, _visited_segments, _weights) = - finalize_execution_state(&executed_txs, &mut self.executor, &self.backend, &on_top_of)?; + // Add back the unexecuted transactions to the mempool. + stats.n_re_added_to_mempool = txs_to_process.len(); + self.mempool.re_add_txs(txs_to_process, executed_txs); tracing::debug!( "Finished tick with {} new transactions, now at {} - re-adding {} txs to mempool", @@ -358,7 +304,7 @@ impl BlockProductionTask { stats.n_re_added_to_mempool ); - Ok((state_diff, stats)) + Ok((state_diff, visited_segments, bouncer_weights, stats)) } /// Each "tick" of the block time updates the pending block but only with the appropriate fraction of the total bouncer capacity. @@ -405,7 +351,7 @@ impl BlockProductionTask { }; let start_time = Instant::now(); - let (state_diff, stats) = self.continue_block(bouncer_cap)?; + let (state_diff, visited_segments, bouncer_weights, stats) = self.continue_block(bouncer_cap)?; if stats.n_added_to_block > 0 { tracing::info!( "🧮 Executed and added {} transaction(s) to the pending block at height {} - {:?}", @@ -417,7 +363,13 @@ impl BlockProductionTask { // Store pending block // todo, prefer using the block import pipeline? - self.backend.store_block(self.block.clone().into(), state_diff, self.declared_classes.clone())?; + self.backend.store_block( + self.block.clone().into(), + state_diff, + self.declared_classes.clone(), + Some(visited_segments), + Some(bouncer_weights), + )?; // do not forget to flush :) self.backend.flush().map_err(|err| BlockImportError::Internal(format!("DB flushing error: {err:#}").into()))?; @@ -432,7 +384,7 @@ impl BlockProductionTask { // Complete the block with full bouncer capacity. let start_time = Instant::now(); - let (mut new_state_diff, _n_executed) = + let (mut new_state_diff, visited_segments, _weights, _stats) = self.continue_block(self.backend.chain_config().bouncer_config.block_max_capacity)?; // SNOS requirement: For blocks >= 10, the hash of the block 10 blocks prior @@ -479,6 +431,7 @@ impl BlockProductionTask { self.backend.chain_config().chain_id.clone(), block_n, declared_classes, + visited_segments, ) .await?; // do not forget to flush :) @@ -566,7 +519,7 @@ impl BlockProductionTask { } #[cfg(test)] -mod test { +mod tests { use std::{collections::HashMap, sync::Arc}; use blockifier::{compiled_class_hash, nonce, state::cached_state::StateMaps, storage_key}; @@ -583,8 +536,10 @@ mod test { }; use starknet_types_core::felt::Felt; + use crate::finalize_execution_state::state_map_to_state_diff; + #[test] - fn state_map_to_state_diff() { + fn test_state_map_to_state_diff() { let backend = MadaraBackend::open_for_testing(Arc::new(ChainConfig::madara_test())); let mut nonces = HashMap::new(); @@ -686,7 +641,7 @@ mod test { replaced_classes, }; - let mut actual = super::state_map_to_state_diff(&backend, &Option::<_>::None, state_map).unwrap(); + let mut actual = state_map_to_state_diff(&backend, &Option::<_>::None, state_map).unwrap(); actual.storage_diffs.sort_by(|a, b| a.address.cmp(&b.address)); actual.storage_diffs.iter_mut().for_each(|s| s.storage_entries.sort_by(|a, b| a.key.cmp(&b.key))); diff --git a/crates/client/mempool/src/block_production_metrics.rs b/crates/client/block_production/src/metrics.rs similarity index 100% rename from crates/client/mempool/src/block_production_metrics.rs rename to crates/client/block_production/src/metrics.rs diff --git a/crates/client/db/Cargo.toml b/crates/client/db/Cargo.toml index 3d9d08658..f78e62c54 100644 --- a/crates/client/db/Cargo.toml +++ b/crates/client/db/Cargo.toml @@ -29,6 +29,7 @@ mp-transactions = { workspace = true } mp-utils = { workspace = true } # Starknet +blockifier = { workspace = true } bonsai-trie = { workspace = true } starknet-types-core = { workspace = true } starknet_api = { workspace = true } diff --git a/crates/client/db/src/block_db.rs b/crates/client/db/src/block_db.rs index 3945d804a..827634be1 100644 --- a/crates/client/db/src/block_db.rs +++ b/crates/client/db/src/block_db.rs @@ -2,10 +2,11 @@ use crate::db_block_id::{DbBlockId, DbBlockIdResolvable}; use crate::{Column, DatabaseExt, MadaraBackend, WriteBatchWithTransaction}; use crate::{MadaraStorageError, DB}; use anyhow::Context; +use blockifier::bouncer::BouncerWeights; use mp_block::header::{GasPrices, PendingHeader}; use mp_block::{ BlockId, BlockTag, MadaraBlock, MadaraBlockInfo, MadaraBlockInner, MadaraMaybePendingBlock, - MadaraMaybePendingBlockInfo, MadaraPendingBlock, MadaraPendingBlockInfo, + MadaraMaybePendingBlockInfo, MadaraPendingBlock, MadaraPendingBlockInfo, VisitedSegments, }; use mp_state_update::StateDiff; use rocksdb::WriteOptions; @@ -23,6 +24,8 @@ struct ChainInfo { const ROW_CHAIN_INFO: &[u8] = b"chain_info"; const ROW_PENDING_INFO: &[u8] = b"pending_info"; const ROW_PENDING_STATE_UPDATE: &[u8] = b"pending_state_update"; +const ROW_PENDING_SEGMENTS: &[u8] = b"pending_segments"; +const ROW_PENDING_BOUNCER_WEIGHTS: &[u8] = b"pending_bouncer_weights"; const ROW_PENDING_INNER: &[u8] = b"pending"; const ROW_SYNC_TIP: &[u8] = b"sync_tip"; const ROW_L1_LAST_CONFIRMED_BLOCK: &[u8] = b"l1_last"; @@ -193,6 +196,28 @@ impl MadaraBackend { Ok(res) } + #[tracing::instrument(skip(self), fields(module = "BlockDB"))] + pub fn get_pending_block_segments(&self) -> Result> { + let col = self.db.get_column(Column::BlockStorageMeta); + let Some(res) = self.db.get_cf(&col, ROW_PENDING_SEGMENTS)? else { + // See pending block quirk + return Ok(None); + }; + let res = Some(bincode::deserialize(&res)?); + Ok(res) + } + + #[tracing::instrument(skip(self), fields(module = "BlockDB"))] + pub fn get_pending_block_bouncer_weights(&self) -> Result> { + let col = self.db.get_column(Column::BlockStorageMeta); + let Some(res) = self.db.get_cf(&col, ROW_PENDING_BOUNCER_WEIGHTS)? else { + // See pending block quirk + return Ok(None); + }; + let res = Some(bincode::deserialize(&res)?); + Ok(res) + } + #[tracing::instrument(skip(self), fields(module = "BlockDB"))] pub fn get_l1_last_confirmed_block(&self) -> Result> { let col = self.db.get_column(Column::BlockStorageMeta); @@ -204,12 +229,24 @@ impl MadaraBackend { // DB write #[tracing::instrument(skip(self), fields(module = "BlockDB"))] - pub(crate) fn block_db_store_pending(&self, block: &MadaraPendingBlock, state_update: &StateDiff) -> Result<()> { + pub(crate) fn block_db_store_pending( + &self, + block: &MadaraPendingBlock, + state_update: &StateDiff, + visited_segments: Option, + bouncer_weights: Option, + ) -> Result<()> { let mut tx = WriteBatchWithTransaction::default(); let col = self.db.get_column(Column::BlockStorageMeta); tx.put_cf(&col, ROW_PENDING_INFO, bincode::serialize(&block.info)?); tx.put_cf(&col, ROW_PENDING_INNER, bincode::serialize(&block.inner)?); tx.put_cf(&col, ROW_PENDING_STATE_UPDATE, bincode::serialize(&state_update)?); + if let Some(visited_segments) = visited_segments { + tx.put_cf(&col, ROW_PENDING_SEGMENTS, bincode::serialize(&visited_segments)?); + } + if let Some(bouncer_weights) = bouncer_weights { + tx.put_cf(&col, ROW_PENDING_BOUNCER_WEIGHTS, bincode::serialize(&bouncer_weights)?); + } let mut writeopts = WriteOptions::new(); writeopts.disable_wal(true); self.db.write_opt(tx, &writeopts)?; @@ -223,6 +260,8 @@ impl MadaraBackend { tx.delete_cf(&col, ROW_PENDING_INFO); tx.delete_cf(&col, ROW_PENDING_INNER); tx.delete_cf(&col, ROW_PENDING_STATE_UPDATE); + tx.delete_cf(&col, ROW_PENDING_SEGMENTS); + tx.delete_cf(&col, ROW_PENDING_BOUNCER_WEIGHTS); let mut writeopts = WriteOptions::new(); writeopts.disable_wal(true); self.db.write_opt(tx, &writeopts)?; diff --git a/crates/client/db/src/lib.rs b/crates/client/db/src/lib.rs index 5f5d27acf..61a92269f 100644 --- a/crates/client/db/src/lib.rs +++ b/crates/client/db/src/lib.rs @@ -8,7 +8,9 @@ use db_metrics::DbMetrics; use mp_chain_config::ChainConfig; use mp_utils::service::{MadaraService, Service}; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; -use rocksdb::{BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, Env, FlushOptions, MultiThreaded}; +use rocksdb::{ + BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, Env, FlushOptions, MultiThreaded, WriteOptions, +}; use rocksdb_options::rocksdb_global_options; use snapshots::Snapshots; use starknet_types_core::hash::{Pedersen, Poseidon, StarkHash}; @@ -18,6 +20,7 @@ use std::{fmt, fs}; use tokio::sync::{mpsc, oneshot}; mod error; +mod rocksdb_options; mod rocksdb_snapshot; mod snapshots; @@ -29,7 +32,7 @@ pub mod db_block_id; pub mod db_metrics; pub mod devnet_db; pub mod l1_db; -mod rocksdb_options; +pub mod mempool_db; pub mod storage_updates; pub mod tests; @@ -72,7 +75,7 @@ fn spawn_backup_db_task( if restore_from_latest_backup { tracing::info!("⏳ Restoring latest backup..."); tracing::debug!("restore path is {db_path:?}"); - fs::create_dir_all(db_path).with_context(|| format!("creating directories {:?}", db_path))?; + fs::create_dir_all(db_path).with_context(|| format!("Creating parent directories {:?}", db_path))?; let opts = rocksdb::backup::RestoreOptions::default(); engine.restore_from_latest_backup(db_path, db_path, &opts).context("Restoring database")?; @@ -146,6 +149,8 @@ pub enum Column { /// Devnet: stores the private keys for the devnet predeployed contracts Devnet, + + MempoolTransactions, } impl fmt::Debug for Column { @@ -192,6 +197,7 @@ impl Column { PendingContractToNonces, PendingContractStorage, Devnet, + MempoolTransactions, ] }; pub const NUM_COLUMNS: usize = Self::ALL.len(); @@ -227,6 +233,7 @@ impl Column { PendingContractToNonces => "pending_contract_to_nonces", PendingContractStorage => "pending_contract_storage", Devnet => "devnet", + MempoolTransactions => "mempool_transactions", } } } @@ -245,6 +252,12 @@ impl DatabaseExt for DB { } } +fn make_write_opt_no_wal() -> WriteOptions { + let mut opts = WriteOptions::new(); + opts.disable_wal(true); + opts +} + #[derive(Debug)] pub struct TrieLogConfig { pub max_saved_trie_logs: usize, @@ -259,7 +272,6 @@ impl Default for TrieLogConfig { } /// Madara client database backend singleton. -#[derive(Debug)] pub struct MadaraBackend { backup_handle: Option>, db: Arc, @@ -268,10 +280,23 @@ pub struct MadaraBackend { snapshots: Arc, trie_log_config: TrieLogConfig, sender_block_info: tokio::sync::broadcast::Sender, + write_opt_no_wal: WriteOptions, #[cfg(feature = "testing")] _temp_dir: Option, } +impl fmt::Debug for MadaraBackend { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MadaraBackend") + .field("backup_handle", &self.backup_handle) + .field("db", &self.db) + .field("chain_config", &self.chain_config) + .field("db_metrics", &self.db_metrics) + .field("sender_block_info", &self.sender_block_info) + .finish() + } +} + pub struct DatabaseService { handle: Arc, } @@ -357,6 +382,7 @@ impl MadaraBackend { snapshots, trie_log_config: Default::default(), sender_block_info: tokio::sync::broadcast::channel(100).0, + write_opt_no_wal: make_write_opt_no_wal(), _temp_dir: Some(temp_dir), }) } @@ -409,6 +435,7 @@ impl MadaraBackend { snapshots, trie_log_config, sender_block_info: tokio::sync::broadcast::channel(100).0, + write_opt_no_wal: make_write_opt_no_wal(), #[cfg(feature = "testing")] _temp_dir: None, }); diff --git a/crates/client/db/src/mempool_db.rs b/crates/client/db/src/mempool_db.rs new file mode 100644 index 000000000..721b0f068 --- /dev/null +++ b/crates/client/db/src/mempool_db.rs @@ -0,0 +1,73 @@ +use crate::DatabaseExt; +use crate::{Column, MadaraBackend, MadaraStorageError}; +use mp_class::ConvertedClass; +use rocksdb::IteratorMode; +use serde::{Deserialize, Serialize}; +use starknet_types_core::felt::Felt; + +type Result = std::result::Result; + +#[derive(Serialize, Deserialize)] +pub struct SavedTransaction { + pub tx: mp_transactions::Transaction, + pub paid_fee_on_l1: Option, + pub contract_address: Option, + pub only_query: bool, + pub arrived_at: u128, +} + +#[derive(Serialize)] +struct TransactionWithConvertedClassRef<'a> { + tx: &'a SavedTransaction, + converted_class: &'a Option, +} +#[derive(Serialize, Deserialize)] +struct TransactionWithConvertedClass { + tx: SavedTransaction, + converted_class: Option, +} + +impl MadaraBackend { + #[tracing::instrument(skip(self), fields(module = "MempoolDB"))] + pub fn get_mempool_transactions( + &self, + ) -> impl Iterator)>> + '_ { + let col = self.db.get_column(Column::MempoolTransactions); + self.db.iterator_cf(&col, IteratorMode::Start).map(|kv| { + let (k, v) = kv?; + let hash: Felt = bincode::deserialize(&k)?; + let tx: TransactionWithConvertedClass = bincode::deserialize(&v)?; + + Result::<_>::Ok((hash, tx.tx, tx.converted_class)) + }) + } + + #[tracing::instrument(skip(self), fields(module = "MempoolDB"))] + pub fn remove_mempool_transaction(&self, tx_hash: &Felt) -> Result<()> { + // Note: We do not use WAL here, as it will be flushed by saving the block. This is to + // ensure saving the block and removing the tx from the saved mempool are both done at once + // atomically. + + let col = self.db.get_column(Column::MempoolTransactions); + self.db.delete_cf_opt(&col, bincode::serialize(tx_hash)?, &self.write_opt_no_wal)?; + tracing::debug!("remove_mempool_tx {:?}", tx_hash); + Ok(()) + } + + #[tracing::instrument(skip(self, tx), fields(module = "MempoolDB"))] + pub fn save_mempool_transaction( + &self, + tx: &SavedTransaction, + tx_hash: Felt, + converted_class: &Option, + ) -> Result<()> { + // Note: WAL is used here + // This is because we want it to be saved even if the node crashes before the next flush + + let col = self.db.get_column(Column::MempoolTransactions); + let tx_with_class = TransactionWithConvertedClassRef { tx, converted_class }; + self.db.put_cf(&col, bincode::serialize(&tx_hash)?, bincode::serialize(&tx_with_class)?)?; + tracing::debug!("save_mempool_tx {:?}", tx_hash); + Ok(()) + } +} diff --git a/crates/client/db/src/storage_updates.rs b/crates/client/db/src/storage_updates.rs index cf2a01454..b5f5ae59e 100644 --- a/crates/client/db/src/storage_updates.rs +++ b/crates/client/db/src/storage_updates.rs @@ -1,6 +1,8 @@ use crate::db_block_id::DbBlockId; use crate::MadaraBackend; use crate::MadaraStorageError; +use blockifier::bouncer::BouncerWeights; +use mp_block::VisitedSegments; use mp_block::{MadaraBlock, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo, MadaraPendingBlock}; use mp_class::ConvertedClass; use mp_state_update::{ @@ -16,6 +18,8 @@ impl MadaraBackend { block: MadaraMaybePendingBlock, state_diff: StateDiff, converted_classes: Vec, + visited_segments: Option, + bouncer_weights: Option, ) -> Result<(), MadaraStorageError> { let block_n = block.info.block_n(); let state_diff_cpy = state_diff.clone(); @@ -24,9 +28,12 @@ impl MadaraBackend { self.clear_pending_block()?; let task_block_db = || match block.info { - MadaraMaybePendingBlockInfo::Pending(info) => { - self.block_db_store_pending(&MadaraPendingBlock { info, inner: block.inner }, &state_diff_cpy) - } + MadaraMaybePendingBlockInfo::Pending(info) => self.block_db_store_pending( + &MadaraPendingBlock { info, inner: block.inner }, + &state_diff_cpy, + visited_segments, + bouncer_weights, + ), MadaraMaybePendingBlockInfo::NotPending(info) => { self.block_db_store_block(&MadaraBlock { info, inner: block.inner }, &state_diff_cpy) } diff --git a/crates/client/db/src/tests/test_block.rs b/crates/client/db/src/tests/test_block.rs index a9e043c06..2d7ed0c88 100644 --- a/crates/client/db/src/tests/test_block.rs +++ b/crates/client/db/src/tests/test_block.rs @@ -24,8 +24,8 @@ mod block_tests { let block_hash = block.info.block_hash().unwrap(); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); - backend.store_block(pending_block_one(), pending_state_diff_one(), vec![]).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); + backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None, None).unwrap(); assert_eq!(backend.resolve_block_id(&BlockId::Hash(block_hash)).unwrap().unwrap(), DbBlockId::Number(0)); assert_eq!(backend.resolve_block_id(&BlockId::Number(0)).unwrap().unwrap(), DbBlockId::Number(0)); @@ -52,7 +52,7 @@ mod block_tests { let block = finalized_block_zero(Header::default()); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); assert_eq!(backend.get_block_hash(&BLOCK_ID_0).unwrap().unwrap(), block.info.block_hash().unwrap()); assert_eq!(BLOCK_ID_0.resolve_db_block_id(backend).unwrap().unwrap(), BLOCK_ID_0); @@ -75,7 +75,7 @@ mod block_tests { let block = pending_block_one(); let state_diff = pending_state_diff_one(); - backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); assert!(backend.get_block_hash(&BLOCK_ID_PENDING).unwrap().is_none()); assert_eq!(backend.get_block_info(&BLOCK_ID_PENDING).unwrap().unwrap(), block.info); @@ -91,8 +91,10 @@ mod block_tests { let db = temp_db().await; let backend = db.backend(); - backend.store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![]).unwrap(); - backend.store_block(pending_block_one(), pending_state_diff_one(), vec![]).unwrap(); + backend + .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None, None) + .unwrap(); + backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None, None).unwrap(); backend.clear_pending_block().unwrap(); assert!(backend.get_block(&BLOCK_ID_PENDING).unwrap().unwrap().inner.transactions.is_empty()); @@ -102,11 +104,11 @@ mod block_tests { "fake pending block parent hash must match with latest block in db" ); - backend.store_block(finalized_block_one(), finalized_state_diff_one(), vec![]).unwrap(); + backend.store_block(finalized_block_one(), finalized_state_diff_one(), vec![], None, None).unwrap(); let block_pending = pending_block_two(); let state_diff = pending_state_diff_two(); - backend.store_block(block_pending.clone(), state_diff.clone(), vec![]).unwrap(); + backend.store_block(block_pending.clone(), state_diff.clone(), vec![], None, None).unwrap(); assert!(backend.get_block_hash(&BLOCK_ID_PENDING).unwrap().is_none()); assert_eq!(backend.get_block_info(&BLOCK_ID_PENDING).unwrap().unwrap(), block_pending.info); @@ -120,10 +122,12 @@ mod block_tests { let db = temp_db().await; let backend = db.backend(); - backend.store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![]).unwrap(); + backend + .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None, None) + .unwrap(); let latest_block = finalized_block_one(); - backend.store_block(latest_block.clone(), finalized_state_diff_one(), vec![]).unwrap(); + backend.store_block(latest_block.clone(), finalized_state_diff_one(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 1); } @@ -148,7 +152,7 @@ mod block_tests { let block = finalized_block_zero(Header::default()); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); let tx_hash_1 = block.info.tx_hashes()[1]; assert_eq!(backend.find_tx_hash_block_info(&tx_hash_1).unwrap().unwrap(), (block.info.clone(), TxIndex(1))); @@ -160,10 +164,12 @@ mod block_tests { let db = temp_db().await; let backend = db.backend(); - backend.store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![]).unwrap(); + backend + .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None, None) + .unwrap(); let block_pending = pending_block_one(); - backend.store_block(block_pending.clone(), pending_state_diff_one(), vec![]).unwrap(); + backend.store_block(block_pending.clone(), pending_state_diff_one(), vec![], None, None).unwrap(); let tx_hash_1 = block_pending.info.tx_hashes()[1]; assert_eq!( diff --git a/crates/client/devnet/Cargo.toml b/crates/client/devnet/Cargo.toml index de637dad9..038dfcd50 100644 --- a/crates/client/devnet/Cargo.toml +++ b/crates/client/devnet/Cargo.toml @@ -19,7 +19,8 @@ targets = ["x86_64-unknown-linux-gnu"] rstest = { workspace = true } mc-db = { workspace = true, features = ["testing"] } mc-mempool = { workspace = true, features = ["testing"] } -tokio = { workspace = true, features = ["rt-multi-thread"] } +mc-block-production = { workspace = true, features = ["testing"] } +tokio = { workspace = true, features = ["rt-multi-thread", "test-util"] } proptest.workspace = true proptest-derive.workspace = true blockifier = { workspace = true, features = ["testing"] } diff --git a/crates/client/devnet/src/lib.rs b/crates/client/devnet/src/lib.rs index 6dc6eb359..f8e0a7e76 100644 --- a/crates/client/devnet/src/lib.rs +++ b/crates/client/devnet/src/lib.rs @@ -187,19 +187,19 @@ mod tests { use super::*; use assert_matches::assert_matches; use mc_block_import::{BlockImporter, BlockValidationContext}; + use mc_block_production::metrics::BlockProductionMetrics; + use mc_block_production::BlockProductionTask; use mc_db::MadaraBackend; - use mc_mempool::block_production::BlockProductionTask; - use mc_mempool::block_production_metrics::BlockProductionMetrics; - use mc_mempool::MempoolProvider; use mc_mempool::{transaction_hash, L1DataProvider, Mempool, MockL1DataProvider}; + use mc_mempool::{MempoolLimits, MempoolProvider}; use mp_block::header::L1DataAvailabilityMode; use mp_block::{BlockId, BlockTag}; use mp_class::{ClassInfo, FlattenedSierraClass}; use mp_receipt::{Event, ExecutionResult, FeePayment, InvokeTransactionReceipt, PriceUnit, TransactionReceipt}; - use mp_transactions::broadcasted_to_blockifier; use mp_transactions::compute_hash::calculate_contract_address; + use mp_transactions::BroadcastedTransactionExt; use rstest::{fixture, rstest}; use starknet_core::types::contract::SierraClass; use starknet_types_rpc::{ @@ -208,6 +208,7 @@ mod tests { InvokeTxnV3, ResourceBounds, ResourceBoundsMapping, }; use std::sync::Arc; + use std::time::Duration; struct DevnetForTesting { backend: Arc, @@ -221,13 +222,13 @@ mod tests { &self, mut tx: BroadcastedInvokeTxn, contract: &DevnetPredeployedContract, - ) -> AddInvokeTransactionResult { - let (blockifier_tx, _classes) = broadcasted_to_blockifier( - BroadcastedTxn::Invoke(tx.clone()), - self.backend.chain_config().chain_id.to_felt(), - self.backend.chain_config().latest_protocol_version, - ) - .unwrap(); + ) -> Result, mc_mempool::Error> { + let (blockifier_tx, _classes) = BroadcastedTxn::Invoke(tx.clone()) + .into_blockifier( + self.backend.chain_config().chain_id.to_felt(), + self.backend.chain_config().latest_protocol_version, + ) + .unwrap(); let signature = contract.secret.sign(&transaction_hash(&blockifier_tx)).unwrap(); let tx_signature = match &mut tx { @@ -240,20 +241,20 @@ mod tests { tracing::debug!("tx: {:?}", tx); - self.mempool.accept_invoke_tx(tx).unwrap() + self.mempool.accept_invoke_tx(tx) } pub fn sign_and_add_declare_tx( &self, mut tx: BroadcastedDeclareTxn, contract: &DevnetPredeployedContract, - ) -> ClassAndTxnHash { - let (blockifier_tx, _classes) = broadcasted_to_blockifier( - BroadcastedTxn::Declare(tx.clone()), - self.backend.chain_config().chain_id.to_felt(), - self.backend.chain_config().latest_protocol_version, - ) - .unwrap(); + ) -> Result, mc_mempool::Error> { + let (blockifier_tx, _classes) = BroadcastedTxn::Declare(tx.clone()) + .into_blockifier( + self.backend.chain_config().chain_id.to_felt(), + self.backend.chain_config().latest_protocol_version, + ) + .unwrap(); let signature = contract.secret.sign(&transaction_hash(&blockifier_tx)).unwrap(); let tx_signature = match &mut tx { @@ -264,20 +265,20 @@ mod tests { }; *tx_signature = vec![signature.r, signature.s]; - self.mempool.accept_declare_tx(tx).unwrap() + self.mempool.accept_declare_tx(tx) } pub fn sign_and_add_deploy_account_tx( &self, mut tx: BroadcastedDeployAccountTxn, contract: &DevnetPredeployedContract, - ) -> ContractAndTxnHash { - let (blockifier_tx, _classes) = broadcasted_to_blockifier( - BroadcastedTxn::DeployAccount(tx.clone()), - self.backend.chain_config().chain_id.to_felt(), - self.backend.chain_config().latest_protocol_version, - ) - .unwrap(); + ) -> Result, mc_mempool::Error> { + let (blockifier_tx, _classes) = BroadcastedTxn::DeployAccount(tx.clone()) + .into_blockifier( + self.backend.chain_config().chain_id.to_felt(), + self.backend.chain_config().latest_protocol_version, + ) + .unwrap(); let signature = contract.secret.sign(&transaction_hash(&blockifier_tx)).unwrap(); let tx_signature = match &mut tx { @@ -287,7 +288,7 @@ mod tests { }; *tx_signature = vec![signature.r, signature.s]; - self.mempool.accept_deploy_account_tx(tx).unwrap() + self.mempool.accept_deploy_account_tx(tx) } /// (STRK in FRI, ETH in WEI) @@ -298,6 +299,10 @@ mod tests { #[fixture] fn chain() -> DevnetForTesting { + chain_with_mempool_limits(MempoolLimits::for_testing()) + } + + fn chain_with_mempool_limits(mempool_limits: MempoolLimits) -> DevnetForTesting { let _ = tracing_subscriber::fmt().with_test_writer().try_init(); let mut g = ChainGenesisDescription::base_config().unwrap(); @@ -308,7 +313,7 @@ mod tests { let backend = MadaraBackend::open_for_testing(Arc::clone(&chain_config)); let importer = Arc::new(BlockImporter::new(Arc::clone(&backend), None).unwrap()); - println!("{:?}", block.state_diff); + tracing::debug!("{:?}", block.state_diff); tokio::runtime::Runtime::new() .unwrap() .block_on( @@ -330,7 +335,7 @@ mod tests { strk_l1_data_gas_price: 128, }); let l1_data_provider = Arc::new(l1_data_provider) as Arc; - let mempool = Arc::new(Mempool::new(Arc::clone(&backend), Arc::clone(&l1_data_provider))); + let mempool = Arc::new(Mempool::new(Arc::clone(&backend), Arc::clone(&l1_data_provider), mempool_limits)); let metrics = BlockProductionMetrics::register(); let block_production = BlockProductionTask::new( @@ -348,7 +353,7 @@ mod tests { #[rstest] #[case(m_cairo_test_contracts::TEST_CONTRACT_SIERRA)] fn test_erc_20_declare(mut chain: DevnetForTesting, #[case] contract: &[u8]) { - println!("{}", chain.contracts); + tracing::info!("{}", chain.contracts); let sender_address = &chain.contracts.0[0]; @@ -377,7 +382,7 @@ mod tests { version: starknet_types_rpc::Version::X3, }); - let res = chain.sign_and_add_declare_tx(declare_txn, sender_address); + let res = chain.sign_and_add_declare_tx(declare_txn, sender_address).unwrap(); let calculated_class_hash = sierra_class.class_hash().unwrap(); @@ -423,31 +428,33 @@ mod tests { // Transferring the funds from pre deployed account into the calculated address let contract_0 = &chain.contracts.0[0]; - let transfer_txn = chain.sign_and_add_invoke_tx( - BroadcastedInvokeTxn::V3(InvokeTxnV3 { - sender_address: contract_0.address, - calldata: Multicall::default() - .with(Call { - to: ERC20_STRK_CONTRACT_ADDRESS, - selector: Selector::from("transfer"), - calldata: vec![calculated_address, (9_999u128 * STRK_FRI_DECIMALS).into(), Felt::ZERO], - }) - .flatten() - .collect(), - signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. - nonce: Felt::ZERO, - resource_bounds: ResourceBoundsMapping { - l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, - l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, - }, - tip: 0, - paymaster_data: vec![], - account_deployment_data: vec![], - nonce_data_availability_mode: DaMode::L1, - fee_data_availability_mode: DaMode::L1, - }), - contract_0, - ); + let transfer_txn = chain + .sign_and_add_invoke_tx( + BroadcastedInvokeTxn::V3(InvokeTxnV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![calculated_address, (9_999u128 * STRK_FRI_DECIMALS).into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: Felt::ZERO, + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: DaMode::L1, + fee_data_availability_mode: DaMode::L1, + }), + contract_0, + ) + .unwrap(); tracing::debug!("tx hash: {:#x}", transfer_txn.transaction_hash); chain.block_production.set_current_pending_tick(chain.backend.chain_config().n_pending_ticks_per_block()); @@ -480,7 +487,7 @@ mod tests { fee_data_availability_mode: DaMode::L1, }); - let res = chain.sign_and_add_deploy_account_tx(deploy_account_txn, &account); + let res = chain.sign_and_add_deploy_account_tx(deploy_account_txn, &account).unwrap(); chain.block_production.set_current_pending_tick(chain.backend.chain_config().n_pending_ticks_per_block()); chain.block_production.on_pending_time_tick().unwrap(); @@ -503,7 +510,7 @@ mod tests { #[case(9_999u128 * STRK_FRI_DECIMALS, false)] #[case(10_001u128 * STRK_FRI_DECIMALS, true)] fn test_basic_transfer(mut chain: DevnetForTesting, #[case] transfer_amount: u128, #[case] expect_reverted: bool) { - println!("{}", chain.contracts); + tracing::info!("{}", chain.contracts); let sequencer_address = chain.backend.chain_config().sequencer_address.to_felt(); let contract_0 = &chain.contracts.0[0]; @@ -513,31 +520,33 @@ mod tests { assert_eq!(chain.get_bal_strk_eth(contract_0.address), (10_000 * STRK_FRI_DECIMALS, 10_000 * ETH_WEI_DECIMALS)); assert_eq!(chain.get_bal_strk_eth(contract_1.address), (10_000 * STRK_FRI_DECIMALS, 10_000 * ETH_WEI_DECIMALS)); - let result = chain.sign_and_add_invoke_tx( - BroadcastedInvokeTxn::V3(InvokeTxnV3 { - sender_address: contract_0.address, - calldata: Multicall::default() - .with(Call { - to: ERC20_STRK_CONTRACT_ADDRESS, - selector: Selector::from("transfer"), - calldata: vec![contract_1.address, transfer_amount.into(), Felt::ZERO], - }) - .flatten() - .collect(), - signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. - nonce: Felt::ZERO, - resource_bounds: ResourceBoundsMapping { - l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, - l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, - }, - tip: 0, - paymaster_data: vec![], - account_deployment_data: vec![], - nonce_data_availability_mode: DaMode::L1, - fee_data_availability_mode: DaMode::L1, - }), - contract_0, - ); + let result = chain + .sign_and_add_invoke_tx( + BroadcastedInvokeTxn::V3(InvokeTxnV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![contract_1.address, transfer_amount.into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: Felt::ZERO, + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: DaMode::L1, + fee_data_availability_mode: DaMode::L1, + }), + contract_0, + ) + .unwrap(); tracing::info!("tx hash: {:#x}", result.transaction_hash); @@ -628,4 +637,130 @@ mod tests { } } } + + #[rstest] + fn test_mempool_tx_limit() { + let chain = chain_with_mempool_limits(MempoolLimits { + max_age: Duration::from_millis(1000000), + max_declare_transactions: 2, + max_transactions: 5, + }); + tracing::info!("{}", chain.contracts); + + let contract_0 = &chain.contracts.0[0]; + let contract_1 = &chain.contracts.0[1]; + + for nonce in 0..5 { + chain + .sign_and_add_invoke_tx( + BroadcastedInvokeTxn::V3(InvokeTxnV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![contract_1.address, 15.into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: nonce.into(), + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: DaMode::L1, + fee_data_availability_mode: DaMode::L1, + }), + contract_0, + ) + .unwrap(); + } + + let result = chain.sign_and_add_invoke_tx( + BroadcastedInvokeTxn::V3(InvokeTxnV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![contract_1.address, 15.into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: 5.into(), + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: DaMode::L1, + fee_data_availability_mode: DaMode::L1, + }), + contract_0, + ); + + assert_matches!( + result, + Err(mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::Limit( + mc_mempool::MempoolLimitReached::MaxTransactions { max: 5 } + ))) + ) + } + + #[rstest] + fn test_mempool_age_limit() { + let max_age = Duration::from_millis(1000); + let mut chain = + chain_with_mempool_limits(MempoolLimits { max_age, max_declare_transactions: 2, max_transactions: 5 }); + tracing::info!("{}", chain.contracts); + + let contract_0 = &chain.contracts.0[0]; + let contract_1 = &chain.contracts.0[1]; + + chain + .sign_and_add_invoke_tx( + BroadcastedInvokeTxn::V3(InvokeTxnV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![contract_1.address, 15.into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: 0.into(), + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: DaMode::L1, + fee_data_availability_mode: DaMode::L1, + }), + contract_0, + ) + .unwrap(); + + std::thread::sleep(max_age); // max age reached + chain.block_production.set_current_pending_tick(1); + chain.block_production.on_pending_time_tick().unwrap(); + + let block = chain.backend.get_block(&BlockId::Tag(BlockTag::Pending)).unwrap().unwrap(); + + // no transactions :) + assert_eq!(block.inner.transactions, vec![]); + assert_eq!(block.inner.receipts, vec![]); + assert!(chain.mempool.is_empty()); + } } diff --git a/crates/client/eth/src/client.rs b/crates/client/eth/src/client.rs index 8b40c9726..21c20a5b1 100644 --- a/crates/client/eth/src/client.rs +++ b/crates/client/eth/src/client.rs @@ -210,12 +210,13 @@ pub mod eth_client_getter_test { AnvilPortNum(guard.next.next().expect("no more port to use")) } - fn create_anvil_instance() -> AnvilInstance { + pub fn create_anvil_instance() -> AnvilInstance { let port = get_port(); let anvil = Anvil::new() .fork(FORK_URL.clone()) .fork_block_number(L1_BLOCK_NUMBER) .port(port.0) + .timeout(20_000) .try_spawn() .expect("failed to spawn anvil instance"); println!("Anvil started and running at `{}`", anvil.endpoint()); diff --git a/crates/client/eth/src/l1_gas_price.rs b/crates/client/eth/src/l1_gas_price.rs index 33a7f7a9d..e6ca2b321 100644 --- a/crates/client/eth/src/l1_gas_price.rs +++ b/crates/client/eth/src/l1_gas_price.rs @@ -99,30 +99,18 @@ async fn update_l1_block_metrics(eth_client: &EthereumClient, l1_gas_provider: G #[cfg(test)] mod eth_client_gas_price_worker_test { use super::*; - use crate::client::eth_client_getter_test::create_ethereum_client; - use alloy::node_bindings::Anvil; + use crate::client::eth_client_getter_test::{create_anvil_instance, create_ethereum_client}; use httpmock::{MockServer, Regex}; use mc_mempool::GasPriceProvider; use serial_test::serial; use std::time::SystemTime; use tokio::task::JoinHandle; use tokio::time::{timeout, Duration}; - const ANOTHER_ANVIL_PORT: u16 = 8546; - const L1_BLOCK_NUMBER: u64 = 20395662; - - lazy_static::lazy_static! { - static ref FORK_URL: String = std::env::var("ETH_FORK_URL").expect("ETH_FORK_URL not set"); - } #[serial] #[tokio::test] async fn gas_price_worker_when_infinite_loop_true_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); @@ -166,12 +154,7 @@ mod eth_client_gas_price_worker_test { #[serial] #[tokio::test] async fn gas_price_worker_when_infinite_loop_false_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); @@ -190,12 +173,7 @@ mod eth_client_gas_price_worker_test { #[serial] #[tokio::test] async fn gas_price_worker_when_gas_price_fix_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); l1_gas_provider.update_eth_l1_gas_price(20); @@ -216,12 +194,7 @@ mod eth_client_gas_price_worker_test { #[serial] #[tokio::test] async fn gas_price_worker_when_data_gas_price_fix_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); l1_gas_provider.update_eth_l1_data_gas_price(20); @@ -306,12 +279,7 @@ mod eth_client_gas_price_worker_test { #[serial] #[tokio::test] async fn update_gas_price_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); diff --git a/crates/client/eth/src/l1_messaging.rs b/crates/client/eth/src/l1_messaging.rs index e85c8c0f5..78e92c8db 100644 --- a/crates/client/eth/src/l1_messaging.rs +++ b/crates/client/eth/src/l1_messaging.rs @@ -5,15 +5,13 @@ use alloy::eips::BlockNumberOrTag; use alloy::primitives::{keccak256, FixedBytes, U256}; use alloy::sol_types::SolValue; use anyhow::Context; -use blockifier::transaction::transaction_execution::Transaction as BlockifierTransation; use futures::StreamExt; use mc_db::{l1_db::LastSyncedEventBlock, MadaraBackend}; use mc_mempool::{Mempool, MempoolProvider}; use mp_utils::channel_wait_or_graceful_shutdown; use mp_utils::service::ServiceContext; use starknet_api::core::{ChainId, ContractAddress, EntryPointSelector, Nonce}; -use starknet_api::transaction::{Calldata, Fee, L1HandlerTransaction, Transaction, TransactionVersion}; -use starknet_api::transaction_hash::get_transaction_hash; +use starknet_api::transaction::{Calldata, L1HandlerTransaction, TransactionVersion}; use starknet_types_core::felt::Felt; use std::sync::Arc; @@ -135,11 +133,12 @@ async fn process_l1_message( event: &LogMessageToL2, l1_block_number: &Option, event_index: &Option, - chain_id: &ChainId, + _chain_id: &ChainId, mempool: Arc, ) -> anyhow::Result> { let transaction = parse_handle_l1_message_transaction(event)?; let tx_nonce = transaction.nonce; + let fees: u128 = event.fee.try_into()?; // Ensure that L1 message has not been executed match backend.has_l1_messaging_nonce(tx_nonce) { @@ -156,16 +155,7 @@ async fn process_l1_message( } }; - let tx_hash = get_transaction_hash(&Transaction::L1Handler(transaction.clone()), chain_id, &transaction.version)?; - let blockifier_transaction = BlockifierTransation::from_api( - Transaction::L1Handler(transaction), - tx_hash, - None, - Some(Fee(event.fee.try_into()?)), - None, - false, - )?; - let res = mempool.accept_l1_handler_tx(blockifier_transaction)?; + let res = mempool.accept_l1_handler_tx(transaction.into(), fees)?; // TODO: remove unwraps // Ques: shall it panic if no block number of event_index? @@ -244,7 +234,7 @@ mod l1_messaging_tests { transports::http::{Client, Http}, }; use mc_db::DatabaseService; - use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool}; + use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool, MempoolLimits}; use mp_chain_config::ChainConfig; use mp_utils::service::ServiceContext; use rstest::*; @@ -367,7 +357,11 @@ mod l1_messaging_tests { let l1_gas_setter = GasPriceProvider::new(); let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); - let mempool = Arc::new(Mempool::new(Arc::clone(db.backend()), Arc::clone(&l1_data_provider))); + let mempool = Arc::new(Mempool::new( + Arc::clone(db.backend()), + Arc::clone(&l1_data_provider), + MempoolLimits::for_testing(), + )); // Set up metrics service let l1_block_metrics = L1BlockMetrics::register().unwrap(); diff --git a/crates/client/exec/Cargo.toml b/crates/client/exec/Cargo.toml index 15e22f290..636f05308 100644 --- a/crates/client/exec/Cargo.toml +++ b/crates/client/exec/Cargo.toml @@ -22,6 +22,8 @@ mp-block = { workspace = true } mp-chain-config = { workspace = true } mp-class = { workspace = true } mp-convert = { workspace = true } +mp-receipt = { workspace = true } +mp-transactions = { workspace = true } # Starknet blockifier = { workspace = true } diff --git a/crates/client/exec/src/lib.rs b/crates/client/exec/src/lib.rs index 2bc1880b7..05b4ba805 100644 --- a/crates/client/exec/src/lib.rs +++ b/crates/client/exec/src/lib.rs @@ -16,6 +16,7 @@ mod call; pub mod execution; mod fee; mod trace; +pub mod transaction; pub use block_context::ExecutionContext; pub use blockifier_state_adapter::BlockifierStateAdapter; diff --git a/crates/client/exec/src/transaction.rs b/crates/client/exec/src/transaction.rs new file mode 100644 index 000000000..0f3522060 --- /dev/null +++ b/crates/client/exec/src/transaction.rs @@ -0,0 +1,78 @@ +use std::{borrow::Cow, sync::Arc}; + +use blockifier::execution::{contract_class::ClassInfo, errors::ContractClassError}; +use blockifier::transaction::transaction_execution as btx; +use mc_db::{MadaraBackend, MadaraStorageError}; +use mp_block::BlockId; +use mp_class::compile::ClassCompilationError; +use mp_convert::ToFelt; +use starknet_api::transaction::{Transaction, TransactionHash}; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Class not found")] + ClassNotFound, + #[error(transparent)] + Storage(#[from] MadaraStorageError), + #[error("Class compilation error: {0:#}")] + ClassCompilationError(#[from] ClassCompilationError), + #[error("Contract class error: {0:#}")] + ContractClassError(#[from] ContractClassError), + #[error("{0}")] + Internal(Cow<'static, str>), +} + +/// Convert an starknet-api Transaction to a blockifier Transaction +/// +/// **note:** this function does not support deploy transaction +/// because it is not supported by blockifier +pub fn to_blockifier_transactions( + backend: Arc, + block_id: BlockId, + transaction: mp_transactions::Transaction, + tx_hash: &TransactionHash, +) -> Result { + let transaction: Transaction = transaction + .try_into() + .map_err(|err| Error::Internal(format!("Converting to starknet api transaction {:#}", err).into()))?; + + let paid_fee_on_l1 = match transaction { + Transaction::L1Handler(_) => Some(starknet_api::transaction::Fee(1_000_000_000_000)), + _ => None, + }; + + let class_info = match &transaction { + Transaction::Declare(declare_tx) => { + let class_hash = declare_tx.class_hash(); + let class_info = backend.get_class_info(&block_id, &class_hash.to_felt())?.ok_or(Error::ClassNotFound)?; + + match class_info { + mp_class::ClassInfo::Sierra(info) => { + let compiled_class = + backend.get_sierra_compiled(&block_id, &info.compiled_class_hash)?.ok_or_else(|| { + Error::Internal( + "Inconsistent state: compiled sierra class from class_hash '{class_hash}' not found" + .into(), + ) + })?; + + let blockifier_class = compiled_class.to_blockifier_class()?; + Some(ClassInfo::new( + &blockifier_class, + info.contract_class.program_length(), + info.contract_class.abi_length(), + )?) + } + mp_class::ClassInfo::Legacy(info) => { + let blockifier_class = info.contract_class.to_blockifier_class()?; + Some(ClassInfo::new(&blockifier_class, 0, 0)?) + } + } + } + _ => None, + }; + + btx::Transaction::from_api(transaction.clone(), *tx_hash, class_info, paid_fee_on_l1, None, false).map_err(|err| { + Error::Internal(format!("Failed to convert transaction to blockifier transaction {:#}", err).into()) + }) +} diff --git a/crates/client/gateway/server/src/error.rs b/crates/client/gateway/server/src/error.rs index 77e09eed9..20e24fcc1 100644 --- a/crates/client/gateway/server/src/error.rs +++ b/crates/client/gateway/server/src/error.rs @@ -89,7 +89,7 @@ impl From for GatewayError { "Insufficient account balance".to_string(), )), StarknetRpcApiError::ValidationFailure { error } => { - GatewayError::StarknetError(StarknetError::new(StarknetErrorCode::ValidateFailure, error)) + GatewayError::StarknetError(StarknetError::new(StarknetErrorCode::ValidateFailure, error.into())) } StarknetRpcApiError::CompilationFailed => GatewayError::StarknetError(StarknetError::new( StarknetErrorCode::CompilationFailed, diff --git a/crates/client/mempool/Cargo.toml b/crates/client/mempool/Cargo.toml index 01b6ca475..9ce40fc20 100644 --- a/crates/client/mempool/Cargo.toml +++ b/crates/client/mempool/Cargo.toml @@ -22,6 +22,8 @@ tokio = { workspace = true, features = ["rt-multi-thread"] } proptest.workspace = true proptest-derive.workspace = true bitvec.workspace = true +tracing = { workspace = true, features = ["log"] } +tracing-test.workspace = true blockifier = { workspace = true, features = ["testing"] } mockall.workspace = true assert_matches.workspace = true diff --git a/crates/client/mempool/src/inner.rs b/crates/client/mempool/src/inner.rs deleted file mode 100644 index 837263ec2..000000000 --- a/crates/client/mempool/src/inner.rs +++ /dev/null @@ -1,678 +0,0 @@ -//! The inner mempool does not perform validation, and is expected to be stored into a RwLock or Mutex. -//! This is the chokepoint for all insertions and popping, as such, we want to make it as fast as possible. -//! Insertion and popping should be O(log n). -//! We also really don't want to poison the lock by panicking. -//! -//! TODO: mempool size limits -//! TODO(perf): should we box the MempoolTransaction? - -use crate::{clone_transaction, contract_addr, nonce, tx_hash}; -use blockifier::transaction::account_transaction::AccountTransaction; -use blockifier::transaction::transaction_execution::Transaction; -use core::fmt; -use mc_exec::execution::TxInfo; -use mp_class::ConvertedClass; -use mp_convert::FeltHexDisplay; -use starknet_api::{ - core::{ContractAddress, Nonce}, - transaction::TransactionHash, -}; -use std::{ - cmp, - collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap}, - iter, - time::SystemTime, -}; - -pub type ArrivedAtTimestamp = SystemTime; - -pub struct MempoolTransaction { - pub tx: Transaction, - pub arrived_at: ArrivedAtTimestamp, - pub converted_class: Option, -} - -impl fmt::Debug for MempoolTransaction { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MempoolTransaction") - .field("tx_hash", &self.tx_hash().hex_display()) - .field("nonce", &self.nonce().hex_display()) - .field("contract_address", &self.contract_address().hex_display()) - .field("tx_type", &self.tx.tx_type()) - .field("arrived_at", &self.arrived_at) - .finish() - } -} - -impl Clone for MempoolTransaction { - fn clone(&self) -> Self { - Self { - tx: clone_transaction(&self.tx), - arrived_at: self.arrived_at, - converted_class: self.converted_class.clone(), - } - } -} - -impl MempoolTransaction { - pub fn nonce(&self) -> Nonce { - nonce(&self.tx) - } - pub fn contract_address(&self) -> ContractAddress { - contract_addr(&self.tx) - } - pub fn tx_hash(&self) -> TransactionHash { - tx_hash(&self.tx) - } -} - -#[derive(Debug)] -struct OrderMempoolTransactionByNonce(MempoolTransaction); - -impl PartialEq for OrderMempoolTransactionByNonce { - fn eq(&self, other: &Self) -> bool { - self.cmp(other).is_eq() - } -} -impl Eq for OrderMempoolTransactionByNonce {} -impl Ord for OrderMempoolTransactionByNonce { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.0.nonce().cmp(&other.0.nonce()) - } -} -impl PartialOrd for OrderMempoolTransactionByNonce { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -/// Invariants: -/// - front_nonce, front_arrived_at and front_tx_hash must match the front transaction timestamp. -/// - No nonce chain should ever be empty in the mempool. -#[derive(Debug)] -pub struct NonceChain { - /// Use a BTreeMap to so that we can use the entry api. - transactions: BTreeMap, - front_arrived_at: ArrivedAtTimestamp, - front_nonce: Nonce, - front_tx_hash: TransactionHash, -} - -#[derive(Eq, PartialEq, Debug)] -pub enum InsertedPosition { - Front { former_head_arrived_at: ArrivedAtTimestamp }, - Other, -} - -#[derive(Eq, PartialEq, Debug)] -pub enum ReplacedState { - Replaced, - NotReplaced, -} - -#[derive(Eq, PartialEq, Debug)] -pub enum NonceChainNewState { - Empty, - NotEmpty, -} - -impl NonceChain { - pub fn new_with_first_tx(tx: MempoolTransaction) -> Self { - Self { - front_arrived_at: tx.arrived_at, - front_tx_hash: tx.tx_hash(), - front_nonce: tx.nonce(), - transactions: iter::once((OrderMempoolTransactionByNonce(tx), ())).collect(), - } - } - - #[cfg(test)] - pub fn check_invariants(&self) { - assert!(!self.transactions.is_empty()); - let (front, _) = self.transactions.first_key_value().unwrap(); - assert_eq!(front.0.tx_hash(), self.front_tx_hash); - assert_eq!(front.0.nonce(), self.front_nonce); - assert_eq!(front.0.arrived_at, self.front_arrived_at); - } - - /// Returns where in the chain it was inserted. - /// When `force` is `true`, this function should never return any error. - pub fn insert( - &mut self, - mempool_tx: MempoolTransaction, - force: bool, - ) -> Result<(InsertedPosition, ReplacedState), TxInsersionError> { - let mempool_tx_arrived_at = mempool_tx.arrived_at; - let mempool_tx_nonce = mempool_tx.nonce(); - let mempool_tx_hash = mempool_tx.tx_hash(); - - let replaced = if force { - if self.transactions.insert(OrderMempoolTransactionByNonce(mempool_tx), ()).is_some() { - ReplacedState::Replaced - } else { - ReplacedState::NotReplaced - } - } else { - match self.transactions.entry(OrderMempoolTransactionByNonce(mempool_tx)) { - btree_map::Entry::Occupied(entry) => { - // duplicate nonce, either it's because the hash is duplicated or nonce conflict with another tx. - if entry.key().0.tx_hash() == mempool_tx_hash { - return Err(TxInsersionError::DuplicateTxn); - } else { - return Err(TxInsersionError::NonceConflict); - } - } - btree_map::Entry::Vacant(entry) => *entry.insert(()), - } - - ReplacedState::NotReplaced - }; - - let position = if self.front_nonce >= mempool_tx_nonce { - // We insrted at the front here - let former_head_arrived_at = core::mem::replace(&mut self.front_arrived_at, mempool_tx_arrived_at); - self.front_nonce = mempool_tx_nonce; - self.front_tx_hash = mempool_tx_hash; - InsertedPosition::Front { former_head_arrived_at } - } else { - InsertedPosition::Other - }; - - #[cfg(debug_assertions)] // unknown field `front_tx_hash` in release if debug_assert_eq is used - assert_eq!( - self.transactions.first_key_value().expect("Getting the first tx").0 .0.tx_hash(), - self.front_tx_hash - ); - - Ok((position, replaced)) - } - - pub fn pop(&mut self) -> (MempoolTransaction, NonceChainNewState) { - // TODO(perf): avoid double lookup - let (tx, _) = self.transactions.pop_first().expect("Nonce chain should not be empty"); - if let Some((new_front, _)) = self.transactions.first_key_value() { - self.front_arrived_at = new_front.0.arrived_at; - #[cfg(debug_assertions)] - { - self.front_tx_hash = new_front.0.tx_hash(); - } - self.front_nonce = new_front.0.nonce(); - (tx.0, NonceChainNewState::NotEmpty) - } else { - (tx.0, NonceChainNewState::Empty) - } - } -} - -#[derive(Clone, Debug)] -struct AccountOrderedByTimestamp { - contract_addr: ContractAddress, - timestamp: ArrivedAtTimestamp, -} - -impl PartialEq for AccountOrderedByTimestamp { - fn eq(&self, other: &Self) -> bool { - self.cmp(other).is_eq() - } -} -impl Eq for AccountOrderedByTimestamp {} -impl Ord for AccountOrderedByTimestamp { - fn cmp(&self, other: &Self) -> cmp::Ordering { - // Important: Fallback on contract addr here. - // There can be timestamp collisions. - self.timestamp.cmp(&other.timestamp).then_with(|| self.contract_addr.cmp(&other.contract_addr)) - } -} -impl PartialOrd for AccountOrderedByTimestamp { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -/// This is used for quickly checking if the contract has been deployed for the same block it is invoked. -/// When force inserting transaction, it may happen that we run into a duplicate deploy_account transaction. Keep a count for that purpose. -#[derive(Debug, Clone, Default)] -struct DeployedContracts(HashMap); -impl DeployedContracts { - fn decrement(&mut self, address: ContractAddress) { - match self.0.entry(address) { - hash_map::Entry::Occupied(mut entry) => { - *entry.get_mut() -= 1; - if entry.get() == &0 { - entry.remove(); - } - } - hash_map::Entry::Vacant(_) => unreachable!("invariant violated"), - } - } - fn increment(&mut self, address: ContractAddress) { - *self.0.entry(address).or_insert(0) += 1 - } - #[cfg(test)] - fn is_empty(&self) -> bool { - self.0.is_empty() - } - fn contains(&self, address: &ContractAddress) -> bool { - self.0.contains_key(address) - } -} - -#[derive(Default, Debug)] -/// Invariants: -/// - Every nonce chain in `nonce_chains` should have a one to one match with `tx_queue`. -/// - Every [`AccountTransaction::DeployAccount`] transaction should have a one to one match with `deployed_contracts`. -/// - See [`NonceChain`] invariants. -pub struct MempoolInner { - /// We have one nonce chain per contract address. - nonce_chains: HashMap, - /// FCFS queue. - tx_queue: BTreeSet, - deployed_contracts: DeployedContracts, -} - -#[derive(thiserror::Error, Debug, PartialEq, Eq)] -pub enum TxInsersionError { - #[error("A transaction with this nonce already exists in the transaction pool")] - NonceConflict, - #[error("A transaction with this hash already exists in the transaction pool")] - DuplicateTxn, -} - -impl MempoolInner { - #[cfg(test)] - pub fn check_invariants(&self) { - self.nonce_chains.values().for_each(NonceChain::check_invariants); - let mut tx_queue = self.tx_queue.clone(); - for (k, v) in &self.nonce_chains { - assert!(tx_queue.remove(&AccountOrderedByTimestamp { contract_addr: *k, timestamp: v.front_arrived_at })) - } - assert!(tx_queue.is_empty()); - let mut deployed_contracts = self.deployed_contracts.clone(); - for (contract, _) in self.nonce_chains.values().flat_map(|chain| &chain.transactions) { - if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &contract.0.tx { - deployed_contracts.decrement(tx.contract_address) - } - } - assert!(deployed_contracts.is_empty(), "remaining deployed_contracts: {deployed_contracts:?}"); - } - - /// When `force` is `true`, this function should never return any error. - pub fn insert_tx(&mut self, mempool_tx: MempoolTransaction, force: bool) -> Result<(), TxInsersionError> { - let contract_addr = mempool_tx.contract_address(); - let arrived_at = mempool_tx.arrived_at; - let deployed_contract_address = - if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { - Some(tx.contract_address) - } else { - None - }; - - let is_replaced = match self.nonce_chains.entry(contract_addr) { - hash_map::Entry::Occupied(mut entry) => { - // Handle nonce collision. - let (position, is_replaced) = match entry.get_mut().insert(mempool_tx, force) { - Ok(position) => position, - Err(nonce_collision_or_duplicate_hash) => { - if force { - panic!("Force add should never error") - } - return Err(nonce_collision_or_duplicate_hash); - } - }; - - match position { - InsertedPosition::Front { former_head_arrived_at } => { - // If we inserted at the front, it has invalidated the tx queue. Update the tx queue. - let removed = self - .tx_queue - .remove(&AccountOrderedByTimestamp { contract_addr, timestamp: former_head_arrived_at }); - debug_assert!(removed); - let inserted = - self.tx_queue.insert(AccountOrderedByTimestamp { contract_addr, timestamp: arrived_at }); - debug_assert!(inserted); - } - InsertedPosition::Other => { - // No need to update the tx queue. - } - } - is_replaced - } - hash_map::Entry::Vacant(entry) => { - // Insert the new nonce chain - let nonce_chain = NonceChain::new_with_first_tx(mempool_tx); - entry.insert(nonce_chain); - - // Also update the tx queue. - let inserted = self.tx_queue.insert(AccountOrderedByTimestamp { contract_addr, timestamp: arrived_at }); - debug_assert!(inserted); - - ReplacedState::NotReplaced - } - }; - - if is_replaced != ReplacedState::Replaced { - if let Some(contract_address) = &deployed_contract_address { - self.deployed_contracts.increment(*contract_address) - } - } - - Ok(()) - } - - pub fn has_deployed_contract(&self, addr: &ContractAddress) -> bool { - self.deployed_contracts.contains(addr) - } - - pub fn pop_next(&mut self) -> Option { - // Pop tx queue. - let tx_queue_account = self.tx_queue.pop_first()?; // Bubble up None if the mempool is empty. - - // Update nonce chain. - let nonce_chain = - self.nonce_chains.get_mut(&tx_queue_account.contract_addr).expect("Nonce chain does not match tx queue"); - let (mempool_tx, nonce_chain_new_state) = nonce_chain.pop(); - match nonce_chain_new_state { - NonceChainNewState::Empty => { - // Remove the nonce chain. - let removed = self.nonce_chains.remove(&tx_queue_account.contract_addr); - debug_assert!(removed.is_some()); - } - NonceChainNewState::NotEmpty => { - // Re-add to tx queue. - let inserted = self.tx_queue.insert(AccountOrderedByTimestamp { - contract_addr: tx_queue_account.contract_addr, - timestamp: nonce_chain.front_arrived_at, - }); - debug_assert!(inserted); - } - } - - // Update deployed contracts. - if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { - self.deployed_contracts.decrement(tx.contract_address); - } - - Some(mempool_tx) - } - - pub fn pop_next_chunk(&mut self, dest: &mut impl Extend, n: usize) { - dest.extend((0..n).map_while(|_| self.pop_next())) - } - - pub fn re_add_txs(&mut self, txs: impl IntoIterator) { - for tx in txs { - let force = true; - self.insert_tx(tx, force).expect("Force insert tx should not error"); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use blockifier::{ - execution::contract_class::ClassInfo, - test_utils::{contracts::FeatureContract, CairoVersion}, - transaction::{transaction_execution::Transaction, transaction_types::TransactionType}, - }; - use mc_exec::execution::TxInfo; - use mp_convert::ToFelt; - use proptest::prelude::*; - use proptest_derive::Arbitrary; - use starknet_api::{ - core::{calculate_contract_address, ChainId}, - data_availability::DataAvailabilityMode, - transaction::{ - ContractAddressSalt, DeclareTransactionV3, DeployAccountTransactionV3, InvokeTransactionV3, Resource, - ResourceBounds, ResourceBoundsMapping, TransactionHasher, TransactionVersion, - }, - }; - use starknet_types_core::felt::Felt; - - use blockifier::abi::abi_utils::selector_from_name; - use starknet_api::transaction::Fee; - use std::{collections::HashSet, fmt, time::Duration}; - - lazy_static::lazy_static! { - static ref DUMMY_CLASS: ClassInfo = { - let dummy_contract_class = FeatureContract::TestContract(CairoVersion::Cairo1); - ClassInfo::new(&dummy_contract_class.get_class(), 100, 100).unwrap() - }; - } - - struct Insert(MempoolTransaction, /* force */ bool); - impl fmt::Debug for Insert { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Insert(ty={:?},arrived_at={:?},tx_hash={:?},contract_address={:?},nonce={:?},force={:?})", - self.0.tx.tx_type(), - self.0.arrived_at, - self.0.tx_hash(), - self.0.contract_address(), - self.0.nonce(), - self.1, - ) - } - } - impl Arbitrary for Insert { - type Parameters = (); - type Strategy = BoxedStrategy; - - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - #[derive(Debug, Arbitrary)] - enum TxTy { - Declare, - DeployAccount, - InvokeFunction, - L1Handler, - } - - <(TxTy, u8, u8, u8, bool)>::arbitrary() - .prop_map(|(ty, arrived_at, contract_address, nonce, force)| { - let arrived_at = SystemTime::UNIX_EPOCH + Duration::from_millis(arrived_at.into()); - let contract_addr = ContractAddress::try_from(Felt::from(contract_address)).unwrap(); - let nonce = Nonce(Felt::from(nonce)); - - let resource_bounds = ResourceBoundsMapping( - [ - (Resource::L1Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), - (Resource::L2Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), - ] - .into(), - ); - - let tx = match ty { - TxTy::Declare => starknet_api::transaction::Transaction::Declare( - starknet_api::transaction::DeclareTransaction::V3(DeclareTransactionV3 { - resource_bounds, - tip: Default::default(), - signature: Default::default(), - nonce, - class_hash: Default::default(), - compiled_class_hash: Default::default(), - sender_address: contract_addr, - nonce_data_availability_mode: DataAvailabilityMode::L1, - fee_data_availability_mode: DataAvailabilityMode::L1, - paymaster_data: Default::default(), - account_deployment_data: Default::default(), - }), - ), - TxTy::DeployAccount => starknet_api::transaction::Transaction::DeployAccount( - starknet_api::transaction::DeployAccountTransaction::V3(DeployAccountTransactionV3 { - resource_bounds, - tip: Default::default(), - signature: Default::default(), - nonce, - class_hash: Default::default(), - nonce_data_availability_mode: DataAvailabilityMode::L1, - fee_data_availability_mode: DataAvailabilityMode::L1, - paymaster_data: Default::default(), - contract_address_salt: ContractAddressSalt(contract_addr.to_felt()), - constructor_calldata: Default::default(), - }), - ), - TxTy::InvokeFunction => starknet_api::transaction::Transaction::Invoke( - starknet_api::transaction::InvokeTransaction::V3(InvokeTransactionV3 { - resource_bounds, - tip: Default::default(), - signature: Default::default(), - nonce, - sender_address: contract_addr, - calldata: Default::default(), - nonce_data_availability_mode: DataAvailabilityMode::L1, - fee_data_availability_mode: DataAvailabilityMode::L1, - paymaster_data: Default::default(), - account_deployment_data: Default::default(), - }), - ), - // TODO: maybe update the values? - TxTy::L1Handler => starknet_api::transaction::Transaction::L1Handler( - starknet_api::transaction::L1HandlerTransaction { - version: TransactionVersion::ZERO, - nonce, - contract_address: contract_addr, - entry_point_selector: selector_from_name("l1_handler_set_value"), - calldata: Default::default(), - }, - ), - }; - - let deployed = if let starknet_api::transaction::Transaction::DeployAccount(tx) = &tx { - Some( - calculate_contract_address( - tx.contract_address_salt(), - Default::default(), - &Default::default(), - Default::default(), - ) - .unwrap(), - ) - } else { - None - }; - - // providing dummy l1 gas for now - let l1_gas_paid = match &tx { - starknet_api::transaction::Transaction::L1Handler(_) => Some(Fee(1)), - _ => None, - }; - - let tx_hash = tx.calculate_transaction_hash(&ChainId::Mainnet, &TransactionVersion::THREE).unwrap(); - - let tx = - Transaction::from_api(tx, tx_hash, Some(DUMMY_CLASS.clone()), l1_gas_paid, deployed, false) - .unwrap(); - - Insert(MempoolTransaction { tx, arrived_at, converted_class: None }, force) - }) - .boxed() - } - } - - #[derive(Debug, Arbitrary)] - enum Operation { - Insert(Insert), - Pop, - } - - #[derive(Debug, Arbitrary)] - struct MempoolInvariantsProblem(Vec); - impl MempoolInvariantsProblem { - fn check(&self) { - tracing::debug!("\n\n\n\n\nCase: {:#?}", self); - let mut mempool = MempoolInner::default(); - mempool.check_invariants(); - - let mut inserted = HashSet::new(); - let mut inserted_contract_nonce_pairs = HashSet::new(); - let mut new_contracts = HashSet::new(); - - let handle_pop = |res: Option, - inserted: &mut HashSet, - inserted_contract_nonce_pairs: &mut HashSet<(Nonce, ContractAddress)>, - new_contracts: &mut HashSet| { - if let Some(res) = &res { - let removed = inserted.remove(&res.tx_hash()); - assert!(removed); - let removed = inserted_contract_nonce_pairs.remove(&(res.nonce(), res.contract_address())); - assert!(removed); - - if res.tx.tx_type() == TransactionType::DeployAccount { - let _removed = new_contracts.remove(&res.contract_address()); - // there can be multiple deploy_account txs. - // assert!(removed) - } - } else { - assert!(inserted.is_empty()) - } - tracing::trace!("Popped {:?}", res.map(|el| Insert(el, false))); - }; - - for op in &self.0 { - match op { - Operation::Insert(insert) => { - let force = insert.1; - tracing::trace!("Insert {:?}", insert); - let res = mempool.insert_tx(insert.0.clone(), insert.1); - - let expected = if !force - && inserted_contract_nonce_pairs.contains(&(insert.0.nonce(), insert.0.contract_address())) - { - if inserted.contains(&insert.0.tx_hash()) { - Err(TxInsersionError::DuplicateTxn) - } else { - Err(TxInsersionError::NonceConflict) - } - } else { - Ok(()) - }; - - assert_eq!(expected, res); - - if expected.is_ok() { - if insert.0.tx.tx_type() == TransactionType::DeployAccount { - new_contracts.insert(insert.0.contract_address()); - } - inserted.insert(insert.0.tx_hash()); - inserted_contract_nonce_pairs.insert((insert.0.nonce(), insert.0.contract_address())); - } - - tracing::trace!("Result {:?}", res); - } - Operation::Pop => { - tracing::trace!("Pop"); - let res = mempool.pop_next(); - handle_pop(res, &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); - } - } - tracing::trace!("State: {mempool:#?}"); - mempool.check_invariants(); - } - - loop { - tracing::trace!("Pop"); - let Some(res) = mempool.pop_next() else { break }; - handle_pop(Some(res), &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); - mempool.check_invariants(); - } - assert!(inserted.is_empty()); - assert!(inserted_contract_nonce_pairs.is_empty()); - assert!(new_contracts.is_empty()); - tracing::trace!("Done :)"); - } - } - - proptest::proptest! { - #![proptest_config(ProptestConfig::with_cases(5))] // comment this when developing, this is mostly for faster ci & whole workspace `cargo test` - #[test] - fn proptest_mempool(pb in any::()) { - let _ = tracing_subscriber::fmt() - .with_max_level(tracing::Level::TRACE) - .with_test_writer() - .try_init(); - - pb.check(); - } - } -} diff --git a/crates/client/mempool/src/inner/deployed_contracts.rs b/crates/client/mempool/src/inner/deployed_contracts.rs new file mode 100644 index 000000000..546d94566 --- /dev/null +++ b/crates/client/mempool/src/inner/deployed_contracts.rs @@ -0,0 +1,32 @@ +use std::collections::{hash_map, HashMap}; + +use starknet_api::core::ContractAddress; + +/// This is used for quickly checking if the contract has been deployed for the same block it is invoked. +/// When force inserting transaction, it may happen that we run into a duplicate deploy_account transaction. Keep a count for that purpose. +#[derive(Debug, Clone, Default)] +pub struct DeployedContracts(HashMap); +impl DeployedContracts { + pub fn decrement(&mut self, address: ContractAddress) { + match self.0.entry(address) { + hash_map::Entry::Occupied(mut entry) => { + *entry.get_mut() -= 1; + if entry.get() == &0 { + // Count is now 0, we can delete the entry. + entry.remove(); + } + } + hash_map::Entry::Vacant(_) => unreachable!("invariant violated"), + } + } + pub fn increment(&mut self, address: ContractAddress) { + *self.0.entry(address).or_insert(0) += 1 + } + #[cfg(test)] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + pub fn contains(&self, address: &ContractAddress) -> bool { + self.0.contains_key(address) + } +} diff --git a/crates/client/mempool/src/inner/limits.rs b/crates/client/mempool/src/inner/limits.rs new file mode 100644 index 000000000..4749a0caa --- /dev/null +++ b/crates/client/mempool/src/inner/limits.rs @@ -0,0 +1,147 @@ +use std::time::{Duration, SystemTime}; + +use blockifier::transaction::transaction_types::TransactionType; +use mc_exec::execution::TxInfo; +use mp_chain_config::ChainConfig; + +use crate::MempoolTransaction; + +#[derive(Debug)] +pub struct MempoolLimits { + pub max_transactions: usize, + pub max_declare_transactions: usize, + pub max_age: Duration, +} + +impl MempoolLimits { + pub fn new(chain_config: &ChainConfig) -> Self { + Self { + max_transactions: chain_config.mempool_tx_limit, + max_declare_transactions: chain_config.mempool_declare_tx_limit, + max_age: chain_config.mempool_tx_max_age, + } + } + #[cfg(any(test, feature = "testing"))] + pub fn for_testing() -> Self { + Self { + max_age: Duration::from_secs(10000000), + max_declare_transactions: usize::MAX, + max_transactions: usize::MAX, + } + } +} + +/// Note: when a transaction is poped from the mempool by block prod, the limits will not be updated until the full +/// tick has been executed and excess transactions are added back into the mempool. +/// This means that the inner mempool may have fewer transactions than what the limits says at a given time. +#[derive(Debug)] +pub(crate) struct MempoolLimiter { + pub config: MempoolLimits, + current_transactions: usize, + current_declare_transactions: usize, +} + +#[derive(thiserror::Error, Debug, PartialEq, Eq)] +pub enum MempoolLimitReached { + #[error("The mempool has reached the limit of {max} transactions")] + MaxTransactions { max: usize }, + #[error("The mempool has reached the limit of {max} declare transactions")] + MaxDeclareTransactions { max: usize }, + #[error("The transaction age is greater than the limit of {max:?}")] + Age { max: Duration }, +} + +pub(crate) struct TransactionCheckedLimits { + check_tx_limit: bool, + check_declare_limit: bool, + check_age: bool, + tx_arrived_at: SystemTime, +} + +impl TransactionCheckedLimits { + // Returns which limits apply for this transaction. + // This struct is also used to update the limits after insertion, without having to keep a clone of the transaction around. + // We can add more limits here as needed :) + pub fn limits_for(tx: &MempoolTransaction) -> Self { + match tx.tx.tx_type() { + TransactionType::Declare => TransactionCheckedLimits { + check_tx_limit: true, + check_declare_limit: true, + check_age: true, + tx_arrived_at: tx.arrived_at, + }, + TransactionType::DeployAccount => TransactionCheckedLimits { + check_tx_limit: true, + check_declare_limit: false, + check_age: true, + tx_arrived_at: tx.arrived_at, + }, + TransactionType::InvokeFunction => TransactionCheckedLimits { + check_tx_limit: true, + check_declare_limit: false, + check_age: true, + tx_arrived_at: tx.arrived_at, + }, + // L1 handler transactions are transactions added into the L1 core contract. We don't want to miss + // any of those if possible. + TransactionType::L1Handler => TransactionCheckedLimits { + check_tx_limit: false, + check_declare_limit: false, + check_age: false, + tx_arrived_at: tx.arrived_at, + }, + } + } +} + +impl MempoolLimiter { + pub fn new(limits: MempoolLimits) -> Self { + Self { config: limits, current_transactions: 0, current_declare_transactions: 0 } + } + + pub fn check_insert_limits(&self, to_check: &TransactionCheckedLimits) -> Result<(), MempoolLimitReached> { + // tx limit + if to_check.check_tx_limit && self.current_transactions >= self.config.max_transactions { + return Err(MempoolLimitReached::MaxTransactions { max: self.config.max_transactions }); + } + + // declare tx limit + if to_check.check_declare_limit && self.current_declare_transactions >= self.config.max_declare_transactions { + return Err(MempoolLimitReached::MaxDeclareTransactions { max: self.config.max_declare_transactions }); + } + + // age + if self.tx_age_exceeded(to_check) { + return Err(MempoolLimitReached::Age { max: self.config.max_age }); + } + + Ok(()) + } + + pub fn tx_age_exceeded(&self, to_check: &TransactionCheckedLimits) -> bool { + if to_check.check_age { + let current_time = SystemTime::now(); + if to_check.tx_arrived_at < current_time.checked_sub(self.config.max_age).unwrap_or(SystemTime::UNIX_EPOCH) + { + return true; + } + } + false + } + + pub fn update_tx_limits(&mut self, limits: &TransactionCheckedLimits) { + // We want all transactions to count toward the limit, not just those where the limit is checked. + self.current_transactions += 1; + if limits.check_declare_limit { + self.current_declare_transactions += 1; + } + } + + pub fn mark_removed(&mut self, to_update: &TransactionCheckedLimits) { + // These should not overflow unless block prod marks transactions as consumed even though they have not been popped. + self.current_transactions -= 1; + if to_update.check_declare_limit { + self.current_declare_transactions -= 1; + } + } +} diff --git a/crates/client/mempool/src/inner/mod.rs b/crates/client/mempool/src/inner/mod.rs new file mode 100644 index 000000000..7e55313d9 --- /dev/null +++ b/crates/client/mempool/src/inner/mod.rs @@ -0,0 +1,269 @@ +//! The inner mempool does not perform validation, and is expected to be stored into a RwLock or Mutex. +//! This is the chokepoint for all insertions and popping, as such, we want to make it as fast as possible. +//! Insertion and popping should be O(log n). +//! We also really don't want to poison the lock by panicking. + +use blockifier::transaction::account_transaction::AccountTransaction; +use blockifier::transaction::transaction_execution::Transaction; +use deployed_contracts::DeployedContracts; +use mp_convert::ToFelt; +use nonce_chain::{InsertedPosition, NonceChain, NonceChainNewState, ReplacedState}; +use starknet_api::core::ContractAddress; +use starknet_types_core::felt::Felt; +use std::{ + cmp, + collections::{hash_map, BTreeSet, HashMap}, +}; + +mod deployed_contracts; +mod limits; +mod nonce_chain; +mod proptest; +mod tx; + +pub use limits::*; +pub use tx::*; + +#[derive(Clone, Debug, PartialEq, Eq)] +struct AccountOrderedByTimestamp { + contract_addr: Felt, + timestamp: ArrivedAtTimestamp, +} + +impl Ord for AccountOrderedByTimestamp { + fn cmp(&self, other: &Self) -> cmp::Ordering { + // Important: Fallback on contract addr here. + // There can be timestamp collisions. + self.timestamp.cmp(&other.timestamp).then_with(|| self.contract_addr.cmp(&other.contract_addr)) + } +} +impl PartialOrd for AccountOrderedByTimestamp { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[derive(Debug)] +/// Invariants: +/// - Every nonce chain in `nonce_chains` should have a one to one match with `tx_queue`. +/// - Every [`AccountTransaction::DeployAccount`] transaction should have a one to one match with `deployed_contracts`. +/// - See [`NonceChain`] invariants. +pub(crate) struct MempoolInner { + /// We have one nonce chain per contract address. + nonce_chains: HashMap, + /// FCFS queue. + tx_queue: BTreeSet, + deployed_contracts: DeployedContracts, + limiter: MempoolLimiter, +} + +#[derive(thiserror::Error, Debug, PartialEq, Eq)] +pub enum TxInsersionError { + #[error("A transaction with this nonce already exists in the transaction pool")] + NonceConflict, + #[error("A transaction with this hash already exists in the transaction pool")] + DuplicateTxn, + #[error(transparent)] + Limit(#[from] MempoolLimitReached), +} + +impl MempoolInner { + pub fn new(limits_config: MempoolLimits) -> Self { + Self { + nonce_chains: Default::default(), + tx_queue: Default::default(), + deployed_contracts: Default::default(), + limiter: MempoolLimiter::new(limits_config), + } + } + + #[cfg(test)] + pub fn check_invariants(&self) { + self.nonce_chains.values().for_each(NonceChain::check_invariants); + let mut tx_queue = self.tx_queue.clone(); + for (k, v) in &self.nonce_chains { + assert!(tx_queue.remove(&AccountOrderedByTimestamp { contract_addr: *k, timestamp: v.front_arrived_at })) + } + assert_eq!(tx_queue, Default::default()); + let mut deployed_contracts = self.deployed_contracts.clone(); + for (contract, _) in self.nonce_chains.values().flat_map(|chain| &chain.transactions) { + if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &contract.0.tx { + deployed_contracts.decrement(tx.contract_address) + } + } + assert!(deployed_contracts.is_empty(), "remaining deployed_contracts: {deployed_contracts:?}"); + } + + /// When `force` is `true`, this function should never return any error. + pub fn insert_tx(&mut self, mempool_tx: MempoolTransaction, force: bool) -> Result<(), TxInsersionError> { + // delete age-exceeded txs from the mempool + // todo(perf): this may want to limit this check once every few seconds to avoid it being in the hot path? + self.remove_age_exceeded_txs(); + + // check limits + let limits_for_tx = TransactionCheckedLimits::limits_for(&mempool_tx); + if !force { + self.limiter.check_insert_limits(&limits_for_tx)?; + } + + let contract_addr = mempool_tx.contract_address().to_felt(); + let arrived_at = mempool_tx.arrived_at; + let deployed_contract_address = + if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { + Some(tx.contract_address) + } else { + None + }; + + let is_replaced = match self.nonce_chains.entry(contract_addr) { + hash_map::Entry::Occupied(mut entry) => { + // Handle nonce collision. + let chain: &mut NonceChain = entry.get_mut(); + let (position, is_replaced) = match chain.insert(mempool_tx, force) { + Ok(position) => position, + Err(nonce_collision_or_duplicate_hash) => { + debug_assert!(!force); // "Force add should never error + return Err(nonce_collision_or_duplicate_hash); + } + }; + + match position { + InsertedPosition::Front { former_head_arrived_at } => { + // If we inserted at the front, it has invalidated the tx queue. Update the tx queue. + let removed = self + .tx_queue + .remove(&AccountOrderedByTimestamp { contract_addr, timestamp: former_head_arrived_at }); + debug_assert!(removed); + let inserted = + self.tx_queue.insert(AccountOrderedByTimestamp { contract_addr, timestamp: arrived_at }); + debug_assert!(inserted); + } + InsertedPosition::Other => { + // No need to update the tx queue. + } + } + is_replaced + } + hash_map::Entry::Vacant(entry) => { + // Insert the new nonce chain + let nonce_chain = NonceChain::new_with_first_tx(mempool_tx); + entry.insert(nonce_chain); + + // Also update the tx queue. + let inserted = self.tx_queue.insert(AccountOrderedByTimestamp { contract_addr, timestamp: arrived_at }); + debug_assert!(inserted); + + ReplacedState::NotReplaced + } + }; + + if let ReplacedState::Replaced { previous } = is_replaced { + // Mark the previous transaction as deleted + self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&previous)); + } else if let Some(contract_address) = &deployed_contract_address { + self.deployed_contracts.increment(*contract_address) + } + + // Update transaction limits + self.limiter.update_tx_limits(&limits_for_tx); + + Ok(()) + } + + pub fn has_deployed_contract(&self, addr: &ContractAddress) -> bool { + self.deployed_contracts.contains(addr) + } + + fn pop_tx_queue_account(&mut self, tx_queue_account: &AccountOrderedByTimestamp) -> MempoolTransaction { + // Update nonce chain. + let nonce_chain = + self.nonce_chains.get_mut(&tx_queue_account.contract_addr).expect("Nonce chain does not match tx queue"); + let (mempool_tx, nonce_chain_new_state) = nonce_chain.pop(); + match nonce_chain_new_state { + NonceChainNewState::Empty => { + // Remove the nonce chain. + let removed = self.nonce_chains.remove(&tx_queue_account.contract_addr); + debug_assert!(removed.is_some()); + } + NonceChainNewState::NotEmpty => { + // Re-add to tx queue. + let inserted = self.tx_queue.insert(AccountOrderedByTimestamp { + contract_addr: tx_queue_account.contract_addr, + timestamp: nonce_chain.front_arrived_at, + }); + debug_assert!(inserted); + } + } + + // Update deployed contracts. + if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { + self.deployed_contracts.decrement(tx.contract_address); + } + + mempool_tx + } + + pub fn remove_age_exceeded_txs(&mut self) { + // Pop tx queue. + // too bad there's no first_entry api, we should check if hashbrown has it to avoid the double lookup. + while let Some(tx_queue_account) = self.tx_queue.first() { + let tx_queue_account = tx_queue_account.clone(); // clone is cheap for this struct + let nonce_chain = self + .nonce_chains + .get_mut(&tx_queue_account.contract_addr) + .expect("Nonce chain does not match tx queue"); + let (k, _v) = nonce_chain.transactions.first_key_value().expect("Nonce chain without a tx"); + + if self.limiter.tx_age_exceeded(&TransactionCheckedLimits::limits_for(&k.0)) { + let tx = self.pop_tx_queue_account(&tx_queue_account); + let _res = self.tx_queue.pop_first().expect("Cannot be empty, checked just above"); + self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&tx)); + } else { + break; + } + } + } + + pub fn pop_next(&mut self) -> Option { + // Pop tx queue. + let mempool_tx = loop { + let tx_queue_account = self.tx_queue.pop_first()?; // Bubble up None if the mempool is empty. + let mempool_tx = self.pop_tx_queue_account(&tx_queue_account); + + let limits = TransactionCheckedLimits::limits_for(&mempool_tx); + if !self.limiter.tx_age_exceeded(&limits) { + break mempool_tx; + } + + self.limiter.mark_removed(&limits); + }; + + // do not update mempool limits, block prod will update it with re-add txs. + Some(mempool_tx) + } + + pub fn pop_next_chunk(&mut self, dest: &mut impl Extend, n: usize) { + dest.extend((0..n).map_while(|_| self.pop_next())) + } + + /// This is called by the block production after a batch of transaction is executed. + /// Mark the consumed txs as consumed, and re-add the transactions that are not consumed in the mempool. + pub fn re_add_txs( + &mut self, + txs: impl IntoIterator, + consumed_txs: impl IntoIterator, + ) { + for tx in consumed_txs { + self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&tx)) + } + for tx in txs { + let force = true; + self.insert_tx(tx, force).expect("Force insert tx should not error"); + } + } + + #[cfg(any(test, feature = "testing"))] + pub fn is_empty(&self) -> bool { + self.tx_queue.is_empty() + } +} diff --git a/crates/client/mempool/src/inner/nonce_chain.rs b/crates/client/mempool/src/inner/nonce_chain.rs new file mode 100644 index 000000000..501d4cfbd --- /dev/null +++ b/crates/client/mempool/src/inner/nonce_chain.rs @@ -0,0 +1,149 @@ +use super::tx::{ArrivedAtTimestamp, MempoolTransaction}; +use crate::TxInsersionError; +use starknet_api::{core::Nonce, transaction::TransactionHash}; +use std::collections::{btree_map, BTreeMap}; +use std::{cmp, iter}; + +#[derive(Debug, Clone)] +pub struct OrderMempoolTransactionByNonce(pub MempoolTransaction); + +impl PartialEq for OrderMempoolTransactionByNonce { + fn eq(&self, other: &Self) -> bool { + self.cmp(other).is_eq() + } +} +impl Eq for OrderMempoolTransactionByNonce {} +impl Ord for OrderMempoolTransactionByNonce { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.0.nonce().cmp(&other.0.nonce()) + } +} +impl PartialOrd for OrderMempoolTransactionByNonce { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// Invariants: +/// - front_nonce, front_arrived_at and front_tx_hash must match the front transaction timestamp. +/// - No nonce chain should ever be empty in the mempool. +#[derive(Debug)] +pub struct NonceChain { + /// Use a BTreeMap to so that we can use the entry api. + // TODO(perf): to avoid some double lookups here, we should remove the `OrderMempoolTransactionByNonce` struct + // and make this a BTreeMap + pub(crate) transactions: BTreeMap, + pub(crate) front_arrived_at: ArrivedAtTimestamp, + pub(crate) front_nonce: Nonce, + pub(crate) front_tx_hash: TransactionHash, +} + +#[derive(Eq, PartialEq, Debug)] +pub enum InsertedPosition { + Front { former_head_arrived_at: ArrivedAtTimestamp }, + Other, +} + +#[derive(Debug)] +pub enum ReplacedState { + Replaced { previous: MempoolTransaction }, + NotReplaced, +} + +#[derive(Eq, PartialEq, Debug)] +pub enum NonceChainNewState { + Empty, + NotEmpty, +} + +impl NonceChain { + pub fn new_with_first_tx(tx: MempoolTransaction) -> Self { + Self { + front_arrived_at: tx.arrived_at, + front_tx_hash: tx.tx_hash(), + front_nonce: tx.nonce(), + transactions: iter::once((OrderMempoolTransactionByNonce(tx), ())).collect(), + } + } + + #[cfg(test)] + pub fn check_invariants(&self) { + assert!(!self.transactions.is_empty()); + let (front, _) = self.transactions.first_key_value().unwrap(); + assert_eq!(front.0.tx_hash(), self.front_tx_hash); + assert_eq!(front.0.nonce(), self.front_nonce); + assert_eq!(front.0.arrived_at, self.front_arrived_at); + } + + /// Returns where in the chain it was inserted. + /// When `force` is `true`, this function should never return any error. + pub fn insert( + &mut self, + mempool_tx: MempoolTransaction, + force: bool, + ) -> Result<(InsertedPosition, ReplacedState), TxInsersionError> { + let mempool_tx_arrived_at = mempool_tx.arrived_at; + let mempool_tx_nonce = mempool_tx.nonce(); + let mempool_tx_hash = mempool_tx.tx_hash(); + + let replaced = if force { + // double lookup here unfortunately.. that's because we're using the keys in a hacky way and can't update the + // entry key using the entry api. + let mempool_tx = OrderMempoolTransactionByNonce(mempool_tx); + if let Some((previous, _)) = self.transactions.remove_entry(&mempool_tx) { + let previous = previous.0.clone(); + let inserted = self.transactions.insert(mempool_tx, ()); + debug_assert!(inserted.is_none()); + ReplacedState::Replaced { previous } + } else { + let inserted = self.transactions.insert(mempool_tx, ()); + debug_assert!(inserted.is_none()); + ReplacedState::NotReplaced + } + } else { + match self.transactions.entry(OrderMempoolTransactionByNonce(mempool_tx)) { + btree_map::Entry::Occupied(entry) => { + // duplicate nonce, either it's because the hash is duplicated or nonce conflict with another tx. + if entry.key().0.tx_hash() == mempool_tx_hash { + return Err(TxInsersionError::DuplicateTxn); + } else { + return Err(TxInsersionError::NonceConflict); + } + } + btree_map::Entry::Vacant(entry) => *entry.insert(()), + } + + ReplacedState::NotReplaced + }; + + let position = if self.front_nonce >= mempool_tx_nonce { + // We insrted at the front here + let former_head_arrived_at = core::mem::replace(&mut self.front_arrived_at, mempool_tx_arrived_at); + self.front_nonce = mempool_tx_nonce; + self.front_tx_hash = mempool_tx_hash; + InsertedPosition::Front { former_head_arrived_at } + } else { + InsertedPosition::Other + }; + + debug_assert_eq!( + self.transactions.first_key_value().expect("Getting the first tx").0 .0.tx_hash(), + self.front_tx_hash + ); + + Ok((position, replaced)) + } + + pub fn pop(&mut self) -> (MempoolTransaction, NonceChainNewState) { + // TODO(perf): avoid double lookup + let (tx, _) = self.transactions.pop_first().expect("Nonce chain should not be empty"); + if let Some((new_front, _)) = self.transactions.first_key_value() { + self.front_arrived_at = new_front.0.arrived_at; + self.front_tx_hash = new_front.0.tx_hash(); + self.front_nonce = new_front.0.nonce(); + (tx.0, NonceChainNewState::NotEmpty) + } else { + (tx.0, NonceChainNewState::Empty) + } + } +} diff --git a/crates/client/mempool/src/inner/proptest.rs b/crates/client/mempool/src/inner/proptest.rs new file mode 100644 index 000000000..501f46e08 --- /dev/null +++ b/crates/client/mempool/src/inner/proptest.rs @@ -0,0 +1,269 @@ +#![cfg(test)] + +use super::*; +use ::proptest::prelude::*; +use blockifier::{ + execution::contract_class::ClassInfo, + test_utils::{contracts::FeatureContract, CairoVersion}, + transaction::{transaction_execution::Transaction, transaction_types::TransactionType}, +}; +use mc_exec::execution::TxInfo; +use mp_convert::ToFelt; +use proptest_derive::Arbitrary; +use starknet_api::{ + core::{calculate_contract_address, ChainId, Nonce}, + data_availability::DataAvailabilityMode, + transaction::{ + ContractAddressSalt, DeclareTransactionV3, DeployAccountTransactionV3, InvokeTransactionV3, Resource, + ResourceBounds, ResourceBoundsMapping, TransactionHash, TransactionHasher, TransactionVersion, + }, +}; +use starknet_types_core::felt::Felt; + +use blockifier::abi::abi_utils::selector_from_name; +use starknet_api::transaction::Fee; +use std::{ + collections::HashSet, + fmt, + time::{Duration, SystemTime}, +}; + +lazy_static::lazy_static! { + static ref DUMMY_CLASS: ClassInfo = { + let dummy_contract_class = FeatureContract::TestContract(CairoVersion::Cairo1); + ClassInfo::new(&dummy_contract_class.get_class(), 100, 100).unwrap() + }; + + static ref NOW: SystemTime = SystemTime::now(); +} + +struct Insert(MempoolTransaction, /* force */ bool); +impl fmt::Debug for Insert { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Insert(ty={:?},arrived_at={:?},tx_hash={:?},contract_address={:?},nonce={:?},force={:?})", + self.0.tx.tx_type(), + self.0.arrived_at, + self.0.tx_hash(), + self.0.contract_address(), + self.0.nonce(), + self.1, + ) + } +} +impl Arbitrary for Insert { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + #[derive(Debug, Arbitrary)] + enum TxTy { + Declare, + DeployAccount, + InvokeFunction, + L1Handler, + } + + <(TxTy, u8, u8, u8, bool)>::arbitrary() + .prop_map(|(ty, arrived_at, contract_address, nonce, force)| { + let arrived_at = *NOW + Duration::from_millis(arrived_at.into()); + let contract_addr = ContractAddress::try_from(Felt::from(contract_address)).unwrap(); + let nonce = Nonce(Felt::from(nonce)); + + let resource_bounds = ResourceBoundsMapping( + [ + (Resource::L1Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), + (Resource::L2Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), + ] + .into(), + ); + + let tx = match ty { + TxTy::Declare => starknet_api::transaction::Transaction::Declare( + starknet_api::transaction::DeclareTransaction::V3(DeclareTransactionV3 { + resource_bounds, + tip: Default::default(), + signature: Default::default(), + nonce, + class_hash: Default::default(), + compiled_class_hash: Default::default(), + sender_address: contract_addr, + nonce_data_availability_mode: DataAvailabilityMode::L1, + fee_data_availability_mode: DataAvailabilityMode::L1, + paymaster_data: Default::default(), + account_deployment_data: Default::default(), + }), + ), + TxTy::DeployAccount => starknet_api::transaction::Transaction::DeployAccount( + starknet_api::transaction::DeployAccountTransaction::V3(DeployAccountTransactionV3 { + resource_bounds, + tip: Default::default(), + signature: Default::default(), + nonce, + class_hash: Default::default(), + nonce_data_availability_mode: DataAvailabilityMode::L1, + fee_data_availability_mode: DataAvailabilityMode::L1, + paymaster_data: Default::default(), + contract_address_salt: ContractAddressSalt(contract_addr.to_felt()), + constructor_calldata: Default::default(), + }), + ), + TxTy::InvokeFunction => starknet_api::transaction::Transaction::Invoke( + starknet_api::transaction::InvokeTransaction::V3(InvokeTransactionV3 { + resource_bounds, + tip: Default::default(), + signature: Default::default(), + nonce, + sender_address: contract_addr, + calldata: Default::default(), + nonce_data_availability_mode: DataAvailabilityMode::L1, + fee_data_availability_mode: DataAvailabilityMode::L1, + paymaster_data: Default::default(), + account_deployment_data: Default::default(), + }), + ), + // TODO: maybe update the values? + TxTy::L1Handler => starknet_api::transaction::Transaction::L1Handler( + starknet_api::transaction::L1HandlerTransaction { + version: TransactionVersion::ZERO, + nonce, + contract_address: contract_addr, + entry_point_selector: selector_from_name("l1_handler_set_value"), + calldata: Default::default(), + }, + ), + }; + + let deployed = if let starknet_api::transaction::Transaction::DeployAccount(tx) = &tx { + Some( + calculate_contract_address( + tx.contract_address_salt(), + Default::default(), + &Default::default(), + Default::default(), + ) + .unwrap(), + ) + } else { + None + }; + + // providing dummy l1 gas for now + let l1_gas_paid = match &tx { + starknet_api::transaction::Transaction::L1Handler(_) => Some(Fee(1)), + _ => None, + }; + + let tx_hash = tx.calculate_transaction_hash(&ChainId::Mainnet, &TransactionVersion::THREE).unwrap(); + + let tx = Transaction::from_api(tx, tx_hash, Some(DUMMY_CLASS.clone()), l1_gas_paid, deployed, false) + .unwrap(); + + Insert(MempoolTransaction { tx, arrived_at, converted_class: None }, force) + }) + .boxed() + } +} + +#[derive(Debug, Arbitrary)] +enum Operation { + Insert(Insert), + Pop, +} + +#[derive(Debug, Arbitrary)] +struct MempoolInvariantsProblem(Vec); +impl MempoolInvariantsProblem { + fn check(&self) { + tracing::debug!("\n\n\n\n\nCase: {:#?}", self); + let mut mempool = MempoolInner::new(MempoolLimits::for_testing()); + mempool.check_invariants(); + + let mut inserted = HashSet::new(); + let mut inserted_contract_nonce_pairs = HashSet::new(); + let mut new_contracts = HashSet::new(); + + let handle_pop = |res: Option, + inserted: &mut HashSet, + inserted_contract_nonce_pairs: &mut HashSet<(Nonce, ContractAddress)>, + new_contracts: &mut HashSet| { + if let Some(res) = &res { + let removed = inserted.remove(&res.tx_hash()); + assert!(removed); + let removed = inserted_contract_nonce_pairs.remove(&(res.nonce(), res.contract_address())); + assert!(removed); + + if res.tx.tx_type() == TransactionType::DeployAccount { + let _removed = new_contracts.remove(&res.contract_address()); + // there can be multiple deploy_account txs. + // assert!(removed) + } + } else { + assert!(inserted.is_empty()) + } + tracing::trace!("Popped {:?}", res.map(|el| Insert(el, false))); + }; + + for op in &self.0 { + match op { + Operation::Insert(insert) => { + let force = insert.1; + tracing::trace!("Insert {:?}", insert); + let res = mempool.insert_tx(insert.0.clone(), insert.1); + + let expected = if !force + && inserted_contract_nonce_pairs.contains(&(insert.0.nonce(), insert.0.contract_address())) + { + if inserted.contains(&insert.0.tx_hash()) { + Err(TxInsersionError::DuplicateTxn) + } else { + Err(TxInsersionError::NonceConflict) + } + } else { + Ok(()) + }; + + assert_eq!(expected, res); + + if expected.is_ok() { + if insert.0.tx.tx_type() == TransactionType::DeployAccount { + new_contracts.insert(insert.0.contract_address()); + } + inserted.insert(insert.0.tx_hash()); + inserted_contract_nonce_pairs.insert((insert.0.nonce(), insert.0.contract_address())); + } + + tracing::trace!("Result {:?}", res); + } + Operation::Pop => { + tracing::trace!("Pop"); + let res = mempool.pop_next(); + handle_pop(res, &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); + } + } + tracing::trace!("State: {mempool:#?}"); + mempool.check_invariants(); + } + + loop { + tracing::trace!("Pop"); + let Some(res) = mempool.pop_next() else { break }; + handle_pop(Some(res), &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); + tracing::trace!("State: {mempool:#?}"); + mempool.check_invariants(); + } + assert!(inserted.is_empty()); + assert!(inserted_contract_nonce_pairs.is_empty()); + assert!(new_contracts.is_empty()); + tracing::trace!("Done :)"); + } +} + +::proptest::proptest! { + #[tracing_test::traced_test] + #[test] + fn proptest_mempool(pb in any::()) { + pb.check(); + } +} diff --git a/crates/client/mempool/src/inner/tx.rs b/crates/client/mempool/src/inner/tx.rs new file mode 100644 index 000000000..0106a67df --- /dev/null +++ b/crates/client/mempool/src/inner/tx.rs @@ -0,0 +1,55 @@ +use crate::{clone_transaction, contract_addr, nonce, tx_hash}; +use blockifier::transaction::transaction_execution::Transaction; +use mc_exec::execution::TxInfo; +use mp_class::ConvertedClass; +use mp_convert::FeltHexDisplay; +use starknet_api::{ + core::{ContractAddress, Nonce}, + transaction::TransactionHash, +}; +use std::{fmt, time::SystemTime}; + +pub type ArrivedAtTimestamp = SystemTime; + +pub struct MempoolTransaction { + pub tx: Transaction, + pub arrived_at: ArrivedAtTimestamp, + pub converted_class: Option, +} + +impl fmt::Debug for MempoolTransaction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MempoolTransaction") + .field("tx_hash", &self.tx_hash().hex_display()) + .field("nonce", &self.nonce().hex_display()) + .field("contract_address", &self.contract_address().hex_display()) + .field("tx_type", &self.tx.tx_type()) + .field("arrived_at", &self.arrived_at) + .finish() + } +} + +impl Clone for MempoolTransaction { + fn clone(&self) -> Self { + Self { + tx: clone_transaction(&self.tx), + arrived_at: self.arrived_at, + converted_class: self.converted_class.clone(), + } + } +} + +impl MempoolTransaction { + pub fn clone_tx(&self) -> Transaction { + clone_transaction(&self.tx) + } + pub fn nonce(&self) -> Nonce { + nonce(&self.tx) + } + pub fn contract_address(&self) -> ContractAddress { + contract_addr(&self.tx) + } + pub fn tx_hash(&self) -> TransactionHash { + tx_hash(&self.tx) + } +} diff --git a/crates/client/mempool/src/l1.rs b/crates/client/mempool/src/l1.rs index 3e7c86211..b3bd98c6e 100644 --- a/crates/client/mempool/src/l1.rs +++ b/crates/client/mempool/src/l1.rs @@ -1,3 +1,4 @@ +//! TODO: this should be in the backend use mp_block::header::{GasPrices, L1DataAvailabilityMode}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; diff --git a/crates/client/mempool/src/lib.rs b/crates/client/mempool/src/lib.rs index c8111f9c8..d5bddc425 100644 --- a/crates/client/mempool/src/lib.rs +++ b/crates/client/mempool/src/lib.rs @@ -1,11 +1,11 @@ +use anyhow::Context; use blockifier::blockifier::stateful_validator::StatefulValidatorError; use blockifier::transaction::account_transaction::AccountTransaction; use blockifier::transaction::transaction_execution::Transaction; use blockifier::transaction::transactions::{ - DeclareTransaction, DeployAccountTransaction, InvokeTransaction, L1HandlerTransaction, + DeclareTransaction, DeployAccountTransaction, InvokeTransaction, L1HandlerTransaction as BL1HandlerTransaction, }; use header::make_pending_header; -use inner::MempoolInner; use mc_db::db_block_id::DbBlockId; use mc_db::{MadaraBackend, MadaraStorageError}; use mc_exec::ExecutionContext; @@ -13,10 +13,11 @@ use metrics::MempoolMetrics; use mp_block::{BlockId, BlockTag, MadaraPendingBlockInfo}; use mp_class::ConvertedClass; use mp_convert::ToFelt; -use mp_transactions::{ - broadcasted_declare_v0_to_blockifier, broadcasted_to_blockifier, BroadcastedDeclareTransactionV0, -}; -use mp_transactions::{BroadcastedToBlockifierError, L1HandlerTransactionResult}; +use mp_transactions::BroadcastedDeclareTransactionV0; +use mp_transactions::BroadcastedToBlockifierError; +use mp_transactions::BroadcastedTransactionExt; +use mp_transactions::L1HandlerTransaction; +use mp_transactions::L1HandlerTransactionResult; use starknet_api::core::{ContractAddress, Nonce}; use starknet_api::transaction::TransactionHash; use starknet_types_core::felt::Felt; @@ -25,20 +26,21 @@ use starknet_types_rpc::{ BroadcastedTxn, ClassAndTxnHash, ContractAndTxnHash, }; use std::sync::{Arc, RwLock}; +use std::time::SystemTime; +use tx::blockifier_to_saved_tx; +use tx::saved_to_blockifier_tx; -pub use inner::TxInsersionError; -pub use inner::{ArrivedAtTimestamp, MempoolTransaction}; #[cfg(any(test, feature = "testing"))] pub use l1::MockL1DataProvider; pub use l1::{GasPriceProvider, L1DataProvider}; -pub mod block_production; -pub mod block_production_metrics; -mod close_block; pub mod header; mod inner; mod l1; pub mod metrics; +mod tx; + +pub use inner::*; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -68,13 +70,23 @@ pub trait MempoolProvider: Send + Sync { &self, tx: BroadcastedDeployAccountTxn, ) -> Result, Error>; - fn accept_l1_handler_tx(&self, tx: Transaction) -> Result; + fn accept_l1_handler_tx( + &self, + tx: L1HandlerTransaction, + paid_fees_on_l1: u128, + ) -> Result; fn take_txs_chunk + 'static>(&self, dest: &mut I, n: usize) where Self: Sized; fn take_tx(&self) -> Option; - fn re_add_txs + 'static>(&self, txs: I) - where + fn re_add_txs< + I: IntoIterator + 'static, + CI: IntoIterator + 'static, + >( + &self, + txs: I, + consumed_txs: CI, + ) where Self: Sized; fn chain_id(&self) -> Felt; } @@ -87,15 +99,38 @@ pub struct Mempool { } impl Mempool { - pub fn new(backend: Arc, l1_data_provider: Arc) -> Self { - Mempool { backend, l1_data_provider, inner: Default::default(), metrics: MempoolMetrics::register() } + pub fn new(backend: Arc, l1_data_provider: Arc, limits: MempoolLimits) -> Self { + Mempool { + backend, + l1_data_provider, + inner: RwLock::new(MempoolInner::new(limits)), + metrics: MempoolMetrics::register(), + } } - #[tracing::instrument(skip(self), fields(module = "Mempool"))] - fn accept_tx(&self, tx: Transaction, converted_class: Option) -> Result<(), Error> { - // The timestamp *does not* take the transaction validation time into account. - let arrived_at = ArrivedAtTimestamp::now(); + pub fn load_txs_from_db(&mut self) -> Result<(), anyhow::Error> { + for res in self.backend.get_mempool_transactions() { + let (tx_hash, saved_tx, converted_class) = res.context("Getting mempool transactions")?; + let (tx, arrived_at) = saved_to_blockifier_tx(saved_tx, tx_hash, &converted_class) + .context("Converting saved tx to blockifier")?; + + if let Err(err) = self.accept_tx(tx, converted_class, arrived_at) { + match err { + Error::InnerMempool(TxInsersionError::Limit(MempoolLimitReached::Age { .. })) => {} // do nothing + err => tracing::warn!("Could not re-add mempool transaction from db: {err:#}"), + } + } + } + Ok(()) + } + #[tracing::instrument(skip(self), fields(module = "Mempool"))] + fn accept_tx( + &self, + tx: Transaction, + converted_class: Option, + arrived_at: SystemTime, + ) -> Result<(), Error> { // Get pending block. let pending_block_info = if let Some(block) = self.backend.get_block_info(&DbBlockId::Pending)? { block @@ -126,7 +161,8 @@ impl Mempool { None }; - tracing::debug!("Mempool verify tx_hash={:#x}", tx_hash(&tx).to_felt()); + let tx_hash = tx_hash(&tx).to_felt(); + tracing::debug!("Mempool verify tx_hash={:#x}", tx_hash); // Perform validations let exec_context = ExecutionContext::new_in_block(Arc::clone(&self.backend), &pending_block_info)?; @@ -137,19 +173,28 @@ impl Mempool { } if !is_only_query(&tx) { - tracing::debug!("Adding to mempool tx_hash={:#x}", tx_hash(&tx).to_felt()); - // Finally, add it to the nonce chain for the account nonce + tracing::debug!("Adding to inner mempool tx_hash={:#x}", tx_hash); + // Add to db + let saved_tx = blockifier_to_saved_tx(&tx, arrived_at); + self.backend.save_mempool_transaction(&saved_tx, tx_hash, &converted_class)?; + + // Add it to the inner mempool let force = false; self.inner .write() .expect("Poisoned lock") - .insert_tx(MempoolTransaction { tx, arrived_at, converted_class }, force)? - } + .insert_tx(MempoolTransaction { tx, arrived_at, converted_class }, force)?; - self.metrics.accepted_transaction_counter.add(1, &[]); + self.metrics.accepted_transaction_counter.add(1, &[]); + } Ok(()) } + + #[cfg(any(test, feature = "testing"))] + pub fn is_empty(&self) -> bool { + self.inner.read().expect("Poisoned lock").is_empty() + } } pub fn transaction_hash(tx: &Transaction) -> Felt { @@ -180,53 +225,50 @@ fn deployed_contract_address(tx: &Transaction) -> Option { impl MempoolProvider for Mempool { #[tracing::instrument(skip(self), fields(module = "Mempool"))] fn accept_invoke_tx(&self, tx: BroadcastedInvokeTxn) -> Result, Error> { - let (tx, classes) = broadcasted_to_blockifier( - BroadcastedTxn::Invoke(tx), - self.chain_id(), - self.backend.chain_config().latest_protocol_version, - )?; - - let res = AddInvokeTransactionResult { transaction_hash: transaction_hash(&tx) }; - self.accept_tx(tx, classes)?; + let tx = BroadcastedTxn::Invoke(tx); + let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; + + let res = AddInvokeTransactionResult { transaction_hash: transaction_hash(&btx) }; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } #[tracing::instrument(skip(self), fields(module = "Mempool"))] fn accept_declare_v0_tx(&self, tx: BroadcastedDeclareTransactionV0) -> Result, Error> { - let (tx, classes) = broadcasted_declare_v0_to_blockifier( - tx, - self.chain_id(), - self.backend.chain_config().latest_protocol_version, - )?; + let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; let res = ClassAndTxnHash { - transaction_hash: transaction_hash(&tx), - class_hash: declare_class_hash(&tx).expect("Created transaction should be declare"), + transaction_hash: transaction_hash(&btx), + class_hash: declare_class_hash(&btx).expect("Created transaction should be declare"), }; - - self.accept_tx(tx, classes)?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } - fn accept_l1_handler_tx(&self, tx: Transaction) -> Result { - let res = L1HandlerTransactionResult { transaction_hash: transaction_hash(&tx) }; - self.accept_tx(tx, None)?; + #[tracing::instrument(skip(self), fields(module = "Mempool"))] + fn accept_l1_handler_tx( + &self, + tx: L1HandlerTransaction, + paid_fees_on_l1: u128, + ) -> Result { + let (btx, class) = + tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version, paid_fees_on_l1)?; + + let res = L1HandlerTransactionResult { transaction_hash: transaction_hash(&btx) }; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } #[tracing::instrument(skip(self), fields(module = "Mempool"))] fn accept_declare_tx(&self, tx: BroadcastedDeclareTxn) -> Result, Error> { - let (tx, classes) = broadcasted_to_blockifier( - BroadcastedTxn::Declare(tx), - self.chain_id(), - self.backend.chain_config().latest_protocol_version, - )?; + let tx = BroadcastedTxn::Declare(tx); + let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; let res = ClassAndTxnHash { - transaction_hash: transaction_hash(&tx), - class_hash: declare_class_hash(&tx).expect("Created transaction should be declare"), + transaction_hash: transaction_hash(&btx), + class_hash: declare_class_hash(&btx).expect("Created transaction should be declare"), }; - self.accept_tx(tx, classes)?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } @@ -235,17 +277,14 @@ impl MempoolProvider for Mempool { &self, tx: BroadcastedDeployAccountTxn, ) -> Result, Error> { - let (tx, classes) = broadcasted_to_blockifier( - BroadcastedTxn::DeployAccount(tx), - self.chain_id(), - self.backend.chain_config().latest_protocol_version, - )?; + let tx = BroadcastedTxn::DeployAccount(tx); + let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; let res = ContractAndTxnHash { - transaction_hash: transaction_hash(&tx), - contract_address: deployed_contract_address(&tx).expect("Created transaction should be deploy account"), + transaction_hash: transaction_hash(&btx), + contract_address: deployed_contract_address(&btx).expect("Created transaction should be deploy account"), }; - self.accept_tx(tx, classes)?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } @@ -263,10 +302,16 @@ impl MempoolProvider for Mempool { } /// Warning: A lock is taken while a user-supplied function (iterator stuff) is run - Callers should be careful - #[tracing::instrument(skip(self, txs), fields(module = "Mempool"))] - fn re_add_txs + 'static>(&self, txs: I) { + /// This is called by the block production after a batch of transaction is executed. + /// Mark the consumed txs as consumed, and re-add the transactions that are not consumed in the mempool. + #[tracing::instrument(skip(self, txs, consumed_txs), fields(module = "Mempool"))] + fn re_add_txs, CI: IntoIterator>( + &self, + txs: I, + consumed_txs: CI, + ) { let mut inner = self.inner.write().expect("Poisoned lock"); - inner.re_add_txs(txs) + inner.re_add_txs(txs, consumed_txs) } fn chain_id(&self) -> Felt { @@ -340,7 +385,7 @@ pub(crate) fn clone_transaction(tx: &Transaction) -> Transaction { only_query: tx.only_query, }), }), - Transaction::L1HandlerTransaction(tx) => Transaction::L1HandlerTransaction(L1HandlerTransaction { + Transaction::L1HandlerTransaction(tx) => Transaction::L1HandlerTransaction(BL1HandlerTransaction { tx: tx.tx.clone(), tx_hash: tx.tx_hash, paid_fee_on_l1: tx.paid_fee_on_l1, @@ -350,11 +395,7 @@ pub(crate) fn clone_transaction(tx: &Transaction) -> Transaction { #[cfg(test)] mod test { - use std::sync::Arc; - - use starknet_types_core::felt::Felt; - - use crate::MockL1DataProvider; + use super::*; #[rstest::fixture] fn backend() -> Arc { @@ -411,8 +452,8 @@ mod test { l1_data_provider: Arc, tx_account_v0_valid: blockifier::transaction::transaction_execution::Transaction, ) { - let mempool = crate::Mempool::new(backend, l1_data_provider); - let result = mempool.accept_tx(tx_account_v0_valid, None); + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + let result = mempool.accept_tx(tx_account_v0_valid, None, ArrivedAtTimestamp::now()); assert_matches::assert_matches!(result, Ok(())); } @@ -422,8 +463,8 @@ mod test { l1_data_provider: Arc, tx_account_v1_invalid: blockifier::transaction::transaction_execution::Transaction, ) { - let mempool = crate::Mempool::new(backend, l1_data_provider); - let result = mempool.accept_tx(tx_account_v1_invalid, None); + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); + let result = mempool.accept_tx(tx_account_v1_invalid, None, ArrivedAtTimestamp::now()); assert_matches::assert_matches!(result, Err(crate::Error::Validation(_))); } } diff --git a/crates/client/mempool/src/tx.rs b/crates/client/mempool/src/tx.rs new file mode 100644 index 000000000..3d7863874 --- /dev/null +++ b/crates/client/mempool/src/tx.rs @@ -0,0 +1,129 @@ +use std::time::{Duration, SystemTime}; + +use blockifier::{ + execution::{contract_class::ClassInfo as BClassInfo, errors::ContractClassError}, + transaction::{ + account_transaction::AccountTransaction, + errors::TransactionExecutionError, + transaction_execution::Transaction as BTransaction, + transactions::{DeclareTransaction, DeployAccountTransaction, InvokeTransaction, L1HandlerTransaction}, + }, +}; +use mc_db::mempool_db::SavedTransaction; +use mp_class::{compile::ClassCompilationError, ConvertedClass}; +use mp_convert::ToFelt; +use starknet_api::{ + core::ContractAddress, + transaction::{Fee, Transaction as StarknetApiTransaction, TransactionHash}, +}; +use starknet_types_core::felt::Felt; + +pub fn blockifier_to_saved_tx(tx: &BTransaction, arrived_at: SystemTime) -> SavedTransaction { + let arrived_at = arrived_at.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default().as_millis(); + match tx { + BTransaction::AccountTransaction(AccountTransaction::Declare(tx)) => SavedTransaction { + only_query: tx.only_query(), + tx: StarknetApiTransaction::Declare(tx.tx.clone()).into(), + paid_fee_on_l1: None, + contract_address: None, + arrived_at, + }, + BTransaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) => SavedTransaction { + only_query: tx.only_query, + tx: StarknetApiTransaction::DeployAccount(tx.tx.clone()).into(), + paid_fee_on_l1: None, + contract_address: Some(tx.contract_address.to_felt()), + arrived_at, + }, + BTransaction::AccountTransaction(AccountTransaction::Invoke(tx)) => SavedTransaction { + only_query: tx.only_query, + tx: StarknetApiTransaction::Invoke(tx.tx.clone()).into(), + paid_fee_on_l1: None, + contract_address: None, + arrived_at, + }, + BTransaction::L1HandlerTransaction(tx) => SavedTransaction { + only_query: false, + tx: StarknetApiTransaction::L1Handler(tx.tx.clone()).into(), + paid_fee_on_l1: Some(*tx.paid_fee_on_l1), + contract_address: None, + arrived_at, + }, + } +} + +#[derive(Debug, thiserror::Error)] +pub enum SavedToBlockifierTxError { + #[error(transparent)] + Blockifier(#[from] TransactionExecutionError), + #[error("Missing field {0}")] + MissingField(&'static str), + #[error("Invalid contract address")] + InvalidContractAddress, + #[error("Deploy not supported")] + DeployNotSupported, + #[error("Converting class {0:#}")] + ClassCompilationError(#[from] ClassCompilationError), + #[error("Converting class {0:#}")] + ContractClassError(#[from] ContractClassError), +} + +pub fn saved_to_blockifier_tx( + saved_tx: SavedTransaction, + tx_hash: Felt, + converted_class: &Option, +) -> Result<(BTransaction, SystemTime), SavedToBlockifierTxError> { + let tx_hash = TransactionHash(tx_hash); + let arrived_at = SystemTime::UNIX_EPOCH + Duration::from_millis(saved_tx.arrived_at as u64); + let tx = match saved_tx.tx { + mp_transactions::Transaction::L1Handler(tx) => BTransaction::L1HandlerTransaction(L1HandlerTransaction { + tx: tx.try_into().map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?, + tx_hash, + paid_fee_on_l1: Fee(saved_tx + .paid_fee_on_l1 + .ok_or(SavedToBlockifierTxError::MissingField("paid_fee_on_l1"))?), + }), + mp_transactions::Transaction::Declare(tx) => { + let converted_class = + converted_class.as_ref().ok_or(SavedToBlockifierTxError::MissingField("class_info"))?; + + let class_info = match converted_class { + ConvertedClass::Legacy(class) => { + BClassInfo::new(&class.info.contract_class.to_blockifier_class()?, 0, 0)? + } + ConvertedClass::Sierra(class) => BClassInfo::new( + &class.compiled.to_blockifier_class()?, + class.info.contract_class.sierra_program.len(), + class.info.contract_class.abi.len(), + )?, + }; + let tx = tx.try_into().map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?; + let declare_tx = match saved_tx.only_query { + true => DeclareTransaction::new_for_query(tx, tx_hash, class_info)?, + false => DeclareTransaction::new(tx, tx_hash, class_info)?, + }; + BTransaction::AccountTransaction(AccountTransaction::Declare(declare_tx)) + } + mp_transactions::Transaction::DeployAccount(tx) => { + BTransaction::AccountTransaction(AccountTransaction::DeployAccount(DeployAccountTransaction { + tx: tx.try_into().map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?, + tx_hash, + contract_address: ContractAddress::try_from( + saved_tx.contract_address.ok_or(SavedToBlockifierTxError::MissingField("contract_address"))?, + ) + .map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?, + only_query: saved_tx.only_query, + })) + } + mp_transactions::Transaction::Invoke(tx) => { + BTransaction::AccountTransaction(AccountTransaction::Invoke(InvokeTransaction { + tx: tx.try_into().map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?, + tx_hash, + only_query: saved_tx.only_query, + })) + } + mp_transactions::Transaction::Deploy(_) => return Err(SavedToBlockifierTxError::DeployNotSupported), + }; + + Ok((tx, arrived_at)) +} diff --git a/crates/client/rpc/src/errors.rs b/crates/client/rpc/src/errors.rs index 07782bc07..9319309fd 100644 --- a/crates/client/rpc/src/errors.rs +++ b/crates/client/rpc/src/errors.rs @@ -5,6 +5,7 @@ use serde::Serialize; use serde_json::json; use starknet_api::StarknetApiError; use starknet_types_core::felt::Felt; +use std::borrow::Cow; use std::fmt::Display; pub type StarknetRpcResult = Result; @@ -39,7 +40,7 @@ pub enum StorageProofTrie { #[derive(thiserror::Error, Debug)] pub enum StarknetRpcApiError { #[error("Failed to write transaction")] - FailedToReceiveTxn, + FailedToReceiveTxn { err: Option> }, #[error("Contract not found")] ContractNotFound, #[error("Block not found")] @@ -79,7 +80,7 @@ pub enum StarknetRpcApiError { #[error("Account balance is smaller than the transaction's max_fee")] InsufficientAccountBalance, #[error("Account validation failed")] - ValidationFailure { error: String }, + ValidationFailure { error: Cow<'static, str> }, #[error("Compilation failed")] CompilationFailed, #[error("Contract class size is too large")] @@ -109,7 +110,7 @@ pub enum StarknetRpcApiError { impl From<&StarknetRpcApiError> for i32 { fn from(err: &StarknetRpcApiError) -> Self { match err { - StarknetRpcApiError::FailedToReceiveTxn => 1, + StarknetRpcApiError::FailedToReceiveTxn { .. } => 1, StarknetRpcApiError::ContractNotFound => 20, StarknetRpcApiError::BlockNotFound => 24, StarknetRpcApiError::InvalidTxnHash => 25, @@ -151,6 +152,7 @@ impl StarknetRpcApiError { match self { StarknetRpcApiError::ErrUnexpectedError { data } => Some(json!(data)), StarknetRpcApiError::ValidationFailure { error } => Some(json!(error)), + StarknetRpcApiError::FailedToReceiveTxn { err } => err.as_ref().map(|err| json!(err)), StarknetRpcApiError::TxnExecutionError { tx_index, error } => Some(json!({ "transaction_index": tx_index, "execution_error": error, @@ -191,9 +193,10 @@ impl From for StarknetRpcApiError { fn from(err: StarknetError) -> Self { match err.code { StarknetErrorCode::BlockNotFound => StarknetRpcApiError::BlockNotFound, - StarknetErrorCode::TransactionFailed | StarknetErrorCode::ValidateFailure => { - StarknetRpcApiError::ValidationFailure { error: err.message } + StarknetErrorCode::TransactionFailed => { + StarknetRpcApiError::FailedToReceiveTxn { err: Some(err.message.into()) } } + StarknetErrorCode::ValidateFailure => StarknetRpcApiError::ValidationFailure { error: err.message.into() }, StarknetErrorCode::UninitializedContract => StarknetRpcApiError::ContractNotFound, StarknetErrorCode::UndeclaredClass => StarknetRpcApiError::ClassHashNotFound, StarknetErrorCode::InvalidTransactionNonce => StarknetRpcApiError::InvalidTxnNonce, diff --git a/crates/client/rpc/src/providers/mempool.rs b/crates/client/rpc/src/providers/mempool.rs index 01253775b..9ce43a410 100644 --- a/crates/client/rpc/src/providers/mempool.rs +++ b/crates/client/rpc/src/providers/mempool.rs @@ -28,9 +28,16 @@ impl From for StarknetRpcApiError { mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::DuplicateTxn) => { StarknetRpcApiError::DuplicateTxn } - mc_mempool::Error::Validation(err) => StarknetRpcApiError::ValidationFailure { error: format!("{err:#}") }, - mc_mempool::Error::InnerMempool(err) => { - StarknetRpcApiError::ValidationFailure { error: format!("{err:#}") } + mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::Limit(limit)) => { + StarknetRpcApiError::FailedToReceiveTxn { err: Some(format!("{}", limit).into()) } + } + mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::NonceConflict) => { + StarknetRpcApiError::FailedToReceiveTxn { + err: Some("A transaction with this nonce and sender address already exists".into()), + } + } + mc_mempool::Error::Validation(err) => { + StarknetRpcApiError::ValidationFailure { error: format!("{err:#}").into() } } mc_mempool::Error::Exec(err) => { StarknetRpcApiError::TxnExecutionError { tx_index: 0, error: format!("{err:#}") } diff --git a/crates/client/rpc/src/test_utils.rs b/crates/client/rpc/src/test_utils.rs index c0cd7e2ee..7f4d32a0e 100644 --- a/crates/client/rpc/src/test_utils.rs +++ b/crates/client/rpc/src/test_utils.rs @@ -243,6 +243,8 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], + None, + None, ) .unwrap(); @@ -266,6 +268,8 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], + None, + None, ) .unwrap(); @@ -333,6 +337,8 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], + None, + None, ) .unwrap(); @@ -369,6 +375,8 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], + None, + None, ) .unwrap(); } @@ -550,6 +558,8 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[0].clone(), vec![], + None, + None, ) .unwrap(); @@ -572,6 +582,8 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[1].clone(), vec![], + None, + None, ) .unwrap(); @@ -594,6 +606,8 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[2].clone(), vec![], + None, + None, ) .unwrap(); @@ -613,6 +627,8 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[3].clone(), vec![], + None, + None, ) .unwrap(); } diff --git a/crates/client/rpc/src/utils/mod.rs b/crates/client/rpc/src/utils/mod.rs index 8e337869d..6326dd7d3 100644 --- a/crates/client/rpc/src/utils/mod.rs +++ b/crates/client/rpc/src/utils/mod.rs @@ -3,7 +3,7 @@ pub(crate) mod transaction; use std::fmt; use crate::StarknetRpcApiError; -pub use transaction::to_blockifier_transactions; +pub use transaction::to_blockifier_transaction; pub fn display_internal_server_error(err: impl fmt::Display) { tracing::error!(target: "rpc_errors", "{:#}", err); diff --git a/crates/client/rpc/src/utils/transaction.rs b/crates/client/rpc/src/utils/transaction.rs index ef4e5ef97..bbc7c114f 100644 --- a/crates/client/rpc/src/utils/transaction.rs +++ b/crates/client/rpc/src/utils/transaction.rs @@ -13,7 +13,7 @@ use crate::errors::{StarknetRpcApiError, StarknetRpcResult}; /// /// **note:** this function does not support deploy transaction /// because it is not supported by blockifier -pub fn to_blockifier_transactions( +pub fn to_blockifier_transaction( backend: Arc, block_id: BlockId, transaction: mp_transactions::Transaction, diff --git a/crates/client/rpc/src/versions/user/v0_7_1/methods/read/block_hash_and_number.rs b/crates/client/rpc/src/versions/user/v0_7_1/methods/read/block_hash_and_number.rs index e2abf6207..43c173051 100644 --- a/crates/client/rpc/src/versions/user/v0_7_1/methods/read/block_hash_and_number.rs +++ b/crates/client/rpc/src/versions/user/v0_7_1/methods/read/block_hash_and_number.rs @@ -53,6 +53,8 @@ mod tests { }, StateDiff::default(), vec![], + None, + None, ) .unwrap(); @@ -70,6 +72,8 @@ mod tests { }, StateDiff::default(), vec![], + None, + None, ) .unwrap(); @@ -90,6 +94,8 @@ mod tests { }, StateDiff::default(), vec![], + None, + None, ) .unwrap(); @@ -117,6 +123,8 @@ mod tests { }, StateDiff::default(), vec![], + None, + None, ) .unwrap(); diff --git a/crates/client/rpc/src/versions/user/v0_7_1/methods/read/estimate_fee.rs b/crates/client/rpc/src/versions/user/v0_7_1/methods/read/estimate_fee.rs index 52db03551..e399ee8ad 100644 --- a/crates/client/rpc/src/versions/user/v0_7_1/methods/read/estimate_fee.rs +++ b/crates/client/rpc/src/versions/user/v0_7_1/methods/read/estimate_fee.rs @@ -1,17 +1,14 @@ -use std::sync::Arc; - -use starknet_types_core::felt::Felt; -use starknet_types_rpc::{BroadcastedTxn, FeeEstimate, SimulationFlagForEstimateFee}; - -use mc_exec::ExecutionContext; -use mp_block::BlockId; -use mp_transactions::broadcasted_to_blockifier; - use crate::errors::StarknetRpcApiError; use crate::errors::StarknetRpcResult; use crate::utils::ResultExt; use crate::versions::user::v0_7_1::methods::trace::trace_transaction::FALLBACK_TO_SEQUENCER_WHEN_VERSION_BELOW; use crate::Starknet; +use mc_exec::ExecutionContext; +use mp_block::BlockId; +use mp_transactions::BroadcastedTransactionExt; +use starknet_types_core::felt::Felt; +use starknet_types_rpc::{BroadcastedTxn, FeeEstimate, SimulationFlagForEstimateFee}; +use std::sync::Arc; /// Estimate the fee associated with transaction /// @@ -40,7 +37,7 @@ pub async fn estimate_fee( let transactions = request .into_iter() - .map(|tx| broadcasted_to_blockifier(tx, starknet.chain_id(), starknet_version).map(|(tx, _)| tx)) + .map(|tx| tx.into_blockifier(starknet.chain_id(), starknet_version).map(|(tx, _)| tx)) .collect::, _>>() .or_internal_server_error("Failed to convert BroadcastedTransaction to AccountTransaction")?; diff --git a/crates/client/rpc/src/versions/user/v0_7_1/methods/read/get_block_with_receipts.rs b/crates/client/rpc/src/versions/user/v0_7_1/methods/read/get_block_with_receipts.rs index 847816a8f..6cd0a0027 100644 --- a/crates/client/rpc/src/versions/user/v0_7_1/methods/read/get_block_with_receipts.rs +++ b/crates/client/rpc/src/versions/user/v0_7_1/methods/read/get_block_with_receipts.rs @@ -248,6 +248,8 @@ mod tests { }, StateDiff::default(), vec![], + None, + None, ) .unwrap(); diff --git a/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/mod.rs b/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/mod.rs index 8fadb0da4..9453985a8 100644 --- a/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/mod.rs +++ b/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/mod.rs @@ -1,7 +1,4 @@ -pub(crate) mod simulate_transactions; -pub mod trace_block_transactions; -pub(crate) mod trace_transaction; - +use crate::{versions::user::v0_7_1::StarknetTraceRpcApiV0_7_1Server, Starknet}; use jsonrpsee::core::{async_trait, RpcResult}; use mp_block::BlockId; use simulate_transactions::simulate_transactions; @@ -10,7 +7,9 @@ use starknet_types_rpc::{BroadcastedTxn, SimulateTransactionsResult, SimulationF use trace_block_transactions::trace_block_transactions; use trace_transaction::trace_transaction; -use crate::{versions::user::v0_7_1::StarknetTraceRpcApiV0_7_1Server, Starknet}; +pub(crate) mod simulate_transactions; +pub mod trace_block_transactions; +pub(crate) mod trace_transaction; #[async_trait] impl StarknetTraceRpcApiV0_7_1Server for Starknet { diff --git a/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/simulate_transactions.rs b/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/simulate_transactions.rs index 317541aa1..3bd55fa66 100644 --- a/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/simulate_transactions.rs +++ b/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/simulate_transactions.rs @@ -1,15 +1,14 @@ +use super::trace_transaction::FALLBACK_TO_SEQUENCER_WHEN_VERSION_BELOW; +use crate::errors::{StarknetRpcApiError, StarknetRpcResult}; +use crate::utils::ResultExt; +use crate::Starknet; use mc_exec::{execution_result_to_tx_trace, ExecutionContext}; use mp_block::BlockId; -use mp_transactions::broadcasted_to_blockifier; +use mp_transactions::BroadcastedTransactionExt; use starknet_types_core::felt::Felt; use starknet_types_rpc::{BroadcastedTxn, SimulateTransactionsResult, SimulationFlag}; use std::sync::Arc; -use super::trace_transaction::FALLBACK_TO_SEQUENCER_WHEN_VERSION_BELOW; -use crate::errors::{StarknetRpcApiError, StarknetRpcResult}; -use crate::utils::ResultExt; -use crate::Starknet; - pub async fn simulate_transactions( starknet: &Starknet, block_id: BlockId, @@ -29,7 +28,7 @@ pub async fn simulate_transactions( let user_transactions = transactions .into_iter() - .map(|tx| broadcasted_to_blockifier(tx, starknet.chain_id(), starknet_version).map(|(tx, _)| tx)) + .map(|tx| tx.into_blockifier(starknet.chain_id(), starknet_version).map(|(tx, _)| tx)) .collect::, _>>() .or_internal_server_error("Failed to convert broadcasted transaction to blockifier")?; diff --git a/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/trace_block_transactions.rs b/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/trace_block_transactions.rs index 0ddfe57e9..701834ae7 100644 --- a/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/trace_block_transactions.rs +++ b/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/trace_block_transactions.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use super::trace_transaction::FALLBACK_TO_SEQUENCER_WHEN_VERSION_BELOW; use crate::errors::{StarknetRpcApiError, StarknetRpcResult}; -use crate::utils::transaction::to_blockifier_transactions; +use crate::utils::transaction::to_blockifier_transaction; use crate::utils::ResultExt; use crate::Starknet; @@ -30,7 +30,7 @@ pub async fn trace_block_transactions( .into_iter() .zip(block.info.tx_hashes()) .map(|(tx, hash)| { - to_blockifier_transactions(starknet.clone_backend(), block_id.clone(), tx, &TransactionHash(*hash)) + to_blockifier_transaction(starknet.clone_backend(), block_id.clone(), tx, &TransactionHash(*hash)) }) .collect::>()?; diff --git a/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/trace_transaction.rs b/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/trace_transaction.rs index d82ac500c..1dfd170d0 100644 --- a/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/trace_transaction.rs +++ b/crates/client/rpc/src/versions/user/v0_7_1/methods/trace/trace_transaction.rs @@ -1,6 +1,6 @@ use crate::errors::StarknetRpcApiError; use crate::errors::StarknetRpcResult; -use crate::utils::transaction::to_blockifier_transactions; +use crate::utils::transaction::to_blockifier_transaction; use crate::utils::{OptionExt, ResultExt}; use crate::Starknet; use mc_exec::execution_result_to_tx_trace; @@ -32,7 +32,7 @@ pub async fn trace_transaction( let mut block_txs = Iterator::zip(block.inner.transactions.into_iter(), block.info.tx_hashes()).map(|(tx, hash)| { - to_blockifier_transactions(starknet.clone_backend(), block.info.as_block_id(), tx, &TransactionHash(*hash)) + to_blockifier_transaction(starknet.clone_backend(), block.info.as_block_id(), tx, &TransactionHash(*hash)) }); // takes up until not including last tx diff --git a/crates/client/rpc/src/versions/user/v0_8_0/methods/ws/lib.rs b/crates/client/rpc/src/versions/user/v0_8_0/methods/ws/lib.rs index 15c3771bd..4c79cc3c4 100644 --- a/crates/client/rpc/src/versions/user/v0_8_0/methods/ws/lib.rs +++ b/crates/client/rpc/src/versions/user/v0_8_0/methods/ws/lib.rs @@ -169,6 +169,8 @@ mod test { }, mp_state_update::StateDiff::default(), vec![], + None, + None, ) .expect("Storing block"); diff --git a/crates/client/sync/src/fetch/fetchers.rs b/crates/client/sync/src/fetch/fetchers.rs index 36e3f6e7f..fc0f559e8 100644 --- a/crates/client/sync/src/fetch/fetchers.rs +++ b/crates/client/sync/src/fetch/fetchers.rs @@ -273,6 +273,7 @@ fn convert_sequencer_block_pending( .collect(), transactions: block.transactions.into_iter().map(Into::into).collect(), declared_classes: class_update.into_iter().map(Into::into).collect(), + ..Default::default() }) } diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 3821b5181..70b9a3042 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -23,6 +23,7 @@ name = "madara" # Madara mc-analytics = { workspace = true } mc-block-import = { workspace = true } +mc-block-production = { workspace = true } mc-db = { workspace = true } mc-devnet = { workspace = true } mc-eth = { workspace = true } diff --git a/crates/node/src/cli/chain_config_overrides.rs b/crates/node/src/cli/chain_config_overrides.rs index a1b6ac40d..6e1f3fdd9 100644 --- a/crates/node/src/cli/chain_config_overrides.rs +++ b/crates/node/src/cli/chain_config_overrides.rs @@ -49,6 +49,10 @@ pub struct ChainConfigOverridesInner { #[serde(skip_serializing)] #[serde(deserialize_with = "deserialize_private_key")] pub private_key: ZeroingPrivateKey, + pub mempool_tx_limit: usize, + pub mempool_declare_tx_limit: usize, + #[serde(deserialize_with = "deserialize_duration", serialize_with = "serialize_duration")] + pub mempool_tx_max_age: Duration, } impl ChainConfigOverrideParams { @@ -69,6 +73,9 @@ impl ChainConfigOverrideParams { eth_core_contract_address: chain_config.eth_core_contract_address, eth_gps_statement_verifier: chain_config.eth_gps_statement_verifier, private_key: chain_config.private_key, + mempool_tx_limit: chain_config.mempool_tx_limit, + mempool_declare_tx_limit: chain_config.mempool_declare_tx_limit, + mempool_tx_max_age: chain_config.mempool_tx_max_age, feeder_gateway_url: chain_config.feeder_gateway_url, gateway_url: chain_config.gateway_url, }) @@ -120,6 +127,9 @@ impl ChainConfigOverrideParams { versioned_constants, eth_gps_statement_verifier: chain_config_overrides.eth_gps_statement_verifier, private_key: chain_config_overrides.private_key, + mempool_tx_limit: chain_config_overrides.mempool_tx_limit, + mempool_declare_tx_limit: chain_config_overrides.mempool_declare_tx_limit, + mempool_tx_max_age: chain_config_overrides.mempool_tx_max_age, }) } } diff --git a/crates/node/src/cli/mod.rs b/crates/node/src/cli/mod.rs index 8ebf30ecd..8cbf58bdf 100644 --- a/crates/node/src/cli/mod.rs +++ b/crates/node/src/cli/mod.rs @@ -9,6 +9,7 @@ pub mod sync; pub mod telemetry; use crate::cli::l1::L1SyncParams; use analytics::AnalyticsParams; +use anyhow::Context; pub use block_production::*; pub use chain_config_overrides::*; pub use db::*; @@ -247,10 +248,8 @@ impl RunCmd { // Read from the preset if provided (Some(preset), _, _) => ChainConfig::from(preset), // Read the config path if provided - (_, Some(path), _) => ChainConfig::from_yaml(path).map_err(|err| { - tracing::error!("Failed to load config from YAML at path '{}': {}", path.display(), err); - anyhow::anyhow!("Failed to load chain config from file") - })?, + (_, Some(path), _) => ChainConfig::from_yaml(path) + .with_context(|| format!("Failed to load config from YAML at path '{}'", path.display()))?, // Devnet default preset is Devnet if not provided by CLI (_, _, true) => ChainConfig::from(&ChainPreset::Devnet), _ => { diff --git a/crates/node/src/main.rs b/crates/node/src/main.rs index 363bb0a81..453ab5a23 100644 --- a/crates/node/src/main.rs +++ b/crates/node/src/main.rs @@ -5,8 +5,6 @@ mod cli; mod service; mod util; -use std::sync::Arc; - use anyhow::Context; use clap::Parser; use cli::{NetworkType, RunCmd}; @@ -15,11 +13,12 @@ use mc_analytics::Analytics; use mc_block_import::BlockImporter; use mc_db::{DatabaseService, TrieLogConfig}; use mc_gateway_client::GatewayProvider; -use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool}; +use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool, MempoolLimits}; use mc_rpc::providers::{AddTransactionProvider, ForwardToProvider, MempoolAddTxProvider}; use mc_telemetry::{SysInfo, TelemetryService}; use mp_utils::service::{Service, ServiceGroup}; use service::{BlockProductionService, GatewayService, L1SyncService, L2SyncService, RpcService}; +use std::sync::Arc; const GREET_IMPL_NAME: &str = "Madara"; const GREET_SUPPORT_URL: &str = "https://github.com/madara-alliance/madara/issues"; @@ -110,7 +109,13 @@ async fn main() -> anyhow::Result<()> { let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); // declare mempool here so that it can be used to process l1->l2 messages in the l1 service - let mempool = Arc::new(Mempool::new(Arc::clone(db_service.backend()), Arc::clone(&l1_data_provider))); + let mut mempool = Mempool::new( + Arc::clone(db_service.backend()), + Arc::clone(&l1_data_provider), + MempoolLimits::new(&chain_config), + ); + mempool.load_txs_from_db().context("Loading mempool transactions")?; + let mempool = Arc::new(mempool); let l1_service = L1SyncService::new( &run_cmd.l1_sync_params, diff --git a/crates/node/src/service/block_production.rs b/crates/node/src/service/block_production.rs index 39b397d68..e8c759b40 100644 --- a/crates/node/src/service/block_production.rs +++ b/crates/node/src/service/block_production.rs @@ -1,18 +1,15 @@ -use std::{io::Write, sync::Arc}; - +use crate::cli::block_production::BlockProductionParams; use anyhow::Context; use mc_block_import::{BlockImporter, BlockValidationContext}; +use mc_block_production::{metrics::BlockProductionMetrics, BlockProductionTask}; use mc_db::{DatabaseService, MadaraBackend}; use mc_devnet::{ChainGenesisDescription, DevnetKeys}; -use mc_mempool::{ - block_production::BlockProductionTask, block_production_metrics::BlockProductionMetrics, L1DataProvider, Mempool, -}; +use mc_mempool::{L1DataProvider, Mempool}; use mc_telemetry::TelemetryHandle; use mp_utils::service::{MadaraService, Service, ServiceContext}; +use std::{io::Write, sync::Arc}; use tokio::task::JoinSet; -use crate::cli::block_production::BlockProductionParams; - struct StartParams { backend: Arc, block_import: Arc, diff --git a/crates/primitives/block/src/lib.rs b/crates/primitives/block/src/lib.rs index 60dc23ea1..3f97be8ee 100644 --- a/crates/primitives/block/src/lib.rs +++ b/crates/primitives/block/src/lib.rs @@ -278,6 +278,17 @@ impl From for MadaraMaybePendingBlock { } } +/// Visited segments are the class segments that are visited during the execution of the block. +/// This info is an input of SNOS and used for proving. +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct VisitedSegments(pub Vec); + +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct VisitedSegmentEntry { + pub class_hash: Felt, + pub segments: Vec, +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives/chain_config/src/chain_config.rs b/crates/primitives/chain_config/src/chain_config.rs index b23298c74..043649077 100644 --- a/crates/primitives/chain_config/src/chain_config.rs +++ b/crates/primitives/chain_config/src/chain_config.rs @@ -130,6 +130,14 @@ pub struct ChainConfig { #[serde(skip_serializing)] #[serde(deserialize_with = "deserialize_private_key")] pub private_key: ZeroingPrivateKey, + + /// Transaction limit in the mempool. + pub mempool_tx_limit: usize, + /// Transaction limit in the mempool, we have an additional limit for declare transactions. + pub mempool_declare_tx_limit: usize, + /// Max age of a transaction in the mempool. + #[serde(deserialize_with = "deserialize_duration")] + pub mempool_tx_max_age: Duration, } impl ChainConfig { @@ -236,6 +244,10 @@ impl ChainConfig { ), private_key: ZeroingPrivateKey::default(), + + mempool_tx_limit: 10_000, + mempool_declare_tx_limit: 20, + mempool_tx_max_age: Duration::from_secs(60 * 60), // an hour? } } diff --git a/crates/primitives/class/src/lib.rs b/crates/primitives/class/src/lib.rs index 31f84adf3..185d10d62 100644 --- a/crates/primitives/class/src/lib.rs +++ b/crates/primitives/class/src/lib.rs @@ -9,7 +9,7 @@ pub mod convert; mod into_starknet_core; mod into_starknet_types; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub enum ConvertedClass { Legacy(LegacyConvertedClass), Sierra(SierraConvertedClass), diff --git a/crates/primitives/transactions/src/broadcasted_to_blockifier.rs b/crates/primitives/transactions/src/broadcasted_to_blockifier.rs index 4c0ba4dd7..4cbf41556 100644 --- a/crates/primitives/transactions/src/broadcasted_to_blockifier.rs +++ b/crates/primitives/transactions/src/broadcasted_to_blockifier.rs @@ -1,17 +1,114 @@ -use std::sync::Arc; - use crate::{ from_broadcasted_transaction::is_query, into_starknet_api::TransactionApiError, BroadcastedDeclareTransactionV0, - Transaction, TransactionWithHash, + L1HandlerTransaction, Transaction, TransactionWithHash, +}; +use blockifier::{ + execution::contract_class::ClassInfo as BClassInfo, execution::errors::ContractClassError, + transaction::errors::TransactionExecutionError, transaction::transaction_execution::Transaction as BTransaction, }; -use blockifier::{execution::errors::ContractClassError, transaction::errors::TransactionExecutionError}; use mp_chain_config::StarknetVersion; use mp_class::{ class_hash, compile::ClassCompilationError, CompressedLegacyContractClass, ConvertedClass, FlattenedSierraClass, LegacyClassInfo, LegacyConvertedClass, SierraClassInfo, SierraConvertedClass, }; -use starknet_api::transaction::TransactionHash; +use starknet_api::transaction::{Fee, TransactionHash}; use starknet_types_core::felt::Felt; +use starknet_types_rpc::{BroadcastedDeclareTxn, BroadcastedTxn}; +use std::sync::Arc; + +pub trait BroadcastedTransactionExt { + fn into_blockifier( + self, + chain_id: Felt, + starknet_version: StarknetVersion, + ) -> Result<(BTransaction, Option), BroadcastedToBlockifierError>; +} + +impl BroadcastedTransactionExt for BroadcastedTxn { + fn into_blockifier( + self, + chain_id: Felt, + starknet_version: StarknetVersion, + ) -> Result<(BTransaction, Option), BroadcastedToBlockifierError> { + let (class_info, converted_class, class_hash) = match &self { + BroadcastedTxn::Declare(tx) => match tx { + BroadcastedDeclareTxn::V1(tx) | BroadcastedDeclareTxn::QueryV1(tx) => { + handle_class_legacy(Arc::new((tx.contract_class).clone().try_into()?))? + } + BroadcastedDeclareTxn::V2(tx) | BroadcastedDeclareTxn::QueryV2(tx) => { + handle_class_sierra(Arc::new((tx.contract_class).clone().into()), tx.compiled_class_hash)? + } + BroadcastedDeclareTxn::V3(tx) | BroadcastedDeclareTxn::QueryV3(tx) => { + handle_class_sierra(Arc::new((tx.contract_class).clone().into()), tx.compiled_class_hash)? + } + }, + _ => (None, None, None), + }; + + let is_query = is_query(&self); + let TransactionWithHash { transaction, hash } = + TransactionWithHash::from_broadcasted(self, chain_id, starknet_version, class_hash); + let deployed_address = match &transaction { + Transaction::DeployAccount(tx) => Some(tx.calculate_contract_address()), + _ => None, + }; + let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; + + Ok(( + BTransaction::from_api( + transaction, + TransactionHash(hash), + class_info, + None, + deployed_address.map(|address| address.try_into().expect("Address conversion should never fail")), + is_query, + )?, + converted_class, + )) + } +} + +impl L1HandlerTransaction { + pub fn into_blockifier( + self, + chain_id: Felt, + _starknet_version: StarknetVersion, + paid_fees_on_l1: u128, + ) -> Result<(BTransaction, Option), BroadcastedToBlockifierError> { + let transaction = Transaction::L1Handler(self.clone()); + // TODO: check self.version + let hash = self.compute_hash(chain_id, false, false); + let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; + + Ok(( + BTransaction::from_api(transaction, TransactionHash(hash), None, Some(Fee(paid_fees_on_l1)), None, false)?, + None, + )) + } +} + +impl BroadcastedDeclareTransactionV0 { + pub fn into_blockifier( + self, + chain_id: Felt, + starknet_version: StarknetVersion, + ) -> Result<(BTransaction, Option), BroadcastedToBlockifierError> { + let (class_info, converted_class, class_hash) = handle_class_legacy(Arc::clone(&self.contract_class))?; + + let is_query = self.is_query; + let transaction = Transaction::Declare(crate::DeclareTransaction::from_broadcasted_declare_v0( + self, + class_hash.expect("Class hash must be provided for DeclareTransaction"), + )); + let hash = transaction.compute_hash(chain_id, starknet_version, is_query); + let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; + + Ok(( + BTransaction::from_api(transaction, TransactionHash(hash), class_info, None, None, is_query)?, + converted_class, + )) + } +} #[derive(thiserror::Error, Debug)] pub enum BroadcastedToBlockifierError { @@ -29,162 +126,51 @@ pub enum BroadcastedToBlockifierError { ConvertTxBlockifierError(#[from] TransactionExecutionError), #[error("Failed to convert contract class: {0}")] ConvertContractClassError(#[from] ContractClassError), - #[error("Declare legacy contract classes are not supported")] - LegacyContractClassesNotSupported, #[error("Compiled class hash mismatch: expected {expected}, actual {compilation}")] CompiledClassHashMismatch { expected: Felt, compilation: Felt }, #[error("Failed to convert base64 program to cairo program: {0}")] Base64ToCairoError(#[from] base64::DecodeError), } -pub fn broadcasted_declare_v0_to_blockifier( - transaction: BroadcastedDeclareTransactionV0, - chain_id: Felt, - starknet_version: StarknetVersion, -) -> Result< - (blockifier::transaction::transaction_execution::Transaction, Option), - BroadcastedToBlockifierError, -> { - let (class_info, class_hash, extra_class_info) = { - let compressed_legacy_class: CompressedLegacyContractClass = (*transaction.contract_class).clone(); - let class_hash = compressed_legacy_class.compute_class_hash().unwrap(); - let compressed_legacy_class: CompressedLegacyContractClass = (*transaction.contract_class).clone(); - let class_blockifier = - compressed_legacy_class.to_blockifier_class().map_err(BroadcastedToBlockifierError::CompilationFailed)?; - - let class_info = LegacyClassInfo { contract_class: Arc::new(compressed_legacy_class) }; - - ( - Some(blockifier::execution::contract_class::ClassInfo::new(&class_blockifier, 0, 0)?), - Some(class_hash), - Some(ConvertedClass::Legacy(LegacyConvertedClass { class_hash, info: class_info })), - ) - }; - - let is_query = transaction.is_query; - let TransactionWithHash { transaction, hash } = - TransactionWithHash::from_broadcasted_declare_v0(transaction, chain_id, starknet_version, class_hash); - - let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; - +#[allow(clippy::type_complexity)] +fn handle_class_legacy( + contract_class: Arc, +) -> Result<(Option, Option, Option), BroadcastedToBlockifierError> { + let class_hash = contract_class.compute_class_hash()?; + tracing::debug!("Computed legacy class hash: {:?}", class_hash); + let class_blockifier = + contract_class.to_blockifier_class().map_err(BroadcastedToBlockifierError::CompilationFailed)?; Ok(( - blockifier::transaction::transaction_execution::Transaction::from_api( - transaction, - TransactionHash(hash), - class_info, - None, - None, - is_query, - )?, - extra_class_info, + Some(BClassInfo::new(&class_blockifier, 0, 0)?), + Some(ConvertedClass::Legacy(LegacyConvertedClass { class_hash, info: LegacyClassInfo { contract_class } })), + Some(class_hash), )) } -pub fn broadcasted_to_blockifier( - transaction: starknet_types_rpc::BroadcastedTxn, - chain_id: Felt, - starknet_version: StarknetVersion, -) -> Result< - (blockifier::transaction::transaction_execution::Transaction, Option), - BroadcastedToBlockifierError, -> { - let (class_info, class_hash, extra_class_info) = match &transaction { - starknet_types_rpc::BroadcastedTxn::Declare(tx) => match tx { - starknet_types_rpc::BroadcastedDeclareTxn::V1(starknet_types_rpc::BroadcastedDeclareTxnV1 { - contract_class, - .. - }) - | starknet_types_rpc::BroadcastedDeclareTxn::QueryV1(starknet_types_rpc::BroadcastedDeclareTxnV1 { - contract_class, - .. - }) => { - let compressed_legacy_class: CompressedLegacyContractClass = contract_class.clone().try_into()?; - let class_hash = compressed_legacy_class.compute_class_hash().unwrap(); - tracing::debug!("Computed legacy class hash: {:?}", class_hash); - let compressed_legacy_class: CompressedLegacyContractClass = contract_class.clone().try_into()?; - let class_blockifier = compressed_legacy_class - .to_blockifier_class() - .map_err(BroadcastedToBlockifierError::CompilationFailed)?; - let class_info = LegacyClassInfo { contract_class: Arc::new(compressed_legacy_class) }; - - ( - Some(blockifier::execution::contract_class::ClassInfo::new(&class_blockifier, 0, 0)?), - Some(class_hash), - Some(ConvertedClass::Legacy(LegacyConvertedClass { class_hash, info: class_info })), - ) - } - starknet_types_rpc::BroadcastedDeclareTxn::V2(starknet_types_rpc::BroadcastedDeclareTxnV2 { - compiled_class_hash, - contract_class, - .. - }) - | starknet_types_rpc::BroadcastedDeclareTxn::QueryV2(starknet_types_rpc::BroadcastedDeclareTxnV2 { - compiled_class_hash, - contract_class, - .. - }) - | starknet_types_rpc::BroadcastedDeclareTxn::V3(starknet_types_rpc::BroadcastedDeclareTxnV3 { - compiled_class_hash, - contract_class, - .. - }) - | starknet_types_rpc::BroadcastedDeclareTxn::QueryV3(starknet_types_rpc::BroadcastedDeclareTxnV3 { - compiled_class_hash, - contract_class, - .. - }) => { - let flatten_sierra_class: FlattenedSierraClass = contract_class.clone().into(); - let class_hash = flatten_sierra_class - .compute_class_hash() - .map_err(BroadcastedToBlockifierError::ComputeSierraClassHashFailed)?; - let (compiled_class_hash_computed, compiled) = flatten_sierra_class.compile_to_casm()?; - if compiled_class_hash != &compiled_class_hash_computed { - return Err(BroadcastedToBlockifierError::CompiledClassHashMismatch { - expected: *compiled_class_hash, - compilation: compiled_class_hash_computed, - }); - } - let class_info = SierraClassInfo { - contract_class: Arc::new(flatten_sierra_class), - compiled_class_hash: compiled_class_hash_computed, - }; - - ( - Some(blockifier::execution::contract_class::ClassInfo::new( - &compiled.to_blockifier_class()?, - contract_class.sierra_program.len(), - contract_class.abi.as_ref().map(|abi| abi.len()).unwrap_or(0), - )?), - Some(class_hash), - Some(ConvertedClass::Sierra(SierraConvertedClass { - class_hash, - info: class_info, - compiled: Arc::new(compiled), - })), - ) - } - }, - _ => (None, None, None), - }; - - let is_query = is_query(&transaction); - let TransactionWithHash { transaction, hash } = - TransactionWithHash::from_broadcasted(transaction, chain_id, starknet_version, class_hash); - let deployed_address = match &transaction { - Transaction::DeployAccount(tx) => Some(tx.calculate_contract_address()), - _ => None, - }; - let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; - +#[allow(clippy::type_complexity)] +fn handle_class_sierra( + contract_class: Arc, + expected_compiled_class_hash: Felt, +) -> Result<(Option, Option, Option), BroadcastedToBlockifierError> { + let class_hash = contract_class.compute_class_hash()?; + let (compiled_class_hash, compiled) = contract_class.compile_to_casm()?; + if expected_compiled_class_hash != compiled_class_hash { + return Err(BroadcastedToBlockifierError::CompiledClassHashMismatch { + expected: expected_compiled_class_hash, + compilation: compiled_class_hash, + }); + } Ok(( - blockifier::transaction::transaction_execution::Transaction::from_api( - transaction, - TransactionHash(hash), - class_info, - None, - deployed_address.map(|address| address.try_into().expect("Address conversion should never fail")), - is_query, - )?, - extra_class_info, + Some(BClassInfo::new( + &compiled.to_blockifier_class()?, + contract_class.sierra_program.len(), + contract_class.abi.len(), + )?), + Some(ConvertedClass::Sierra(SierraConvertedClass { + class_hash, + info: SierraClassInfo { contract_class, compiled_class_hash }, + compiled: Arc::new(compiled), + })), + Some(class_hash), )) } diff --git a/crates/primitives/transactions/src/from_broadcasted_transaction.rs b/crates/primitives/transactions/src/from_broadcasted_transaction.rs index c76e92406..47b1f62d6 100644 --- a/crates/primitives/transactions/src/from_broadcasted_transaction.rs +++ b/crates/primitives/transactions/src/from_broadcasted_transaction.rs @@ -1,11 +1,10 @@ -use mp_chain_config::StarknetVersion; -use starknet_types_core::felt::Felt; - use crate::{ BroadcastedDeclareTransactionV0, DeclareTransaction, DeclareTransactionV0, DeclareTransactionV1, DeclareTransactionV2, DeclareTransactionV3, DeployAccountTransaction, InvokeTransaction, Transaction, TransactionWithHash, }; +use mp_chain_config::StarknetVersion; +use starknet_types_core::felt::Felt; // class_hash is required for DeclareTransaction impl TransactionWithHash { @@ -29,21 +28,6 @@ impl TransactionWithHash { let hash = transaction.compute_hash(chain_id, starknet_version, is_query); Self { hash, transaction } } - - pub fn from_broadcasted_declare_v0( - tx: BroadcastedDeclareTransactionV0, - chain_id: Felt, - starknet_version: StarknetVersion, - class_hash: Option, - ) -> Self { - let is_query = tx.is_query; - let transaction: Transaction = Transaction::Declare(DeclareTransaction::from_broadcasted_declare_v0( - tx, - class_hash.expect("Class hash must be provided for DeclareTransactionV0"), - )); - let hash = transaction.compute_hash(chain_id, starknet_version, is_query); - Self { hash, transaction } - } } impl From> for InvokeTransaction { @@ -77,7 +61,7 @@ impl DeclareTransaction { } } - fn from_broadcasted_declare_v0(tx: BroadcastedDeclareTransactionV0, class_hash: Felt) -> Self { + pub fn from_broadcasted_declare_v0(tx: BroadcastedDeclareTransactionV0, class_hash: Felt) -> Self { DeclareTransaction::V0(DeclareTransactionV0::from_broadcasted_declare_v0(tx, class_hash)) } } diff --git a/crates/primitives/transactions/src/lib.rs b/crates/primitives/transactions/src/lib.rs index bca511b5f..d045d3ac9 100644 --- a/crates/primitives/transactions/src/lib.rs +++ b/crates/primitives/transactions/src/lib.rs @@ -1,4 +1,8 @@ +use mp_class::CompressedLegacyContractClass; +use mp_convert::hex_serde::{U128AsHex, U64AsHex}; use serde::{Deserialize, Serialize}; +use serde_with::serde_as; +use starknet_api::transaction::TransactionVersion; use starknet_types_core::{felt::Felt, hash::StarkHash}; use std::sync::Arc; @@ -9,17 +13,11 @@ mod from_starknet_types; mod into_starknet_api; mod to_starknet_types; +// pub mod broadcasted; pub mod compute_hash; pub mod utils; -use mp_convert::hex_serde::{U128AsHex, U64AsHex}; -// pub use from_starknet_provider::TransactionTypeError; -pub use broadcasted_to_blockifier::{ - broadcasted_declare_v0_to_blockifier, broadcasted_to_blockifier, BroadcastedToBlockifierError, -}; -use mp_class::CompressedLegacyContractClass; -use serde_with::serde_as; -use starknet_api::transaction::TransactionVersion; +pub use broadcasted_to_blockifier::{BroadcastedToBlockifierError, BroadcastedTransactionExt}; const SIMULATE_TX_VERSION_OFFSET: Felt = Felt::from_hex_unchecked("0x100000000000000000000000000000000"); @@ -45,17 +43,12 @@ impl TransactionWithHash { } } -#[derive(Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct BroadcastedDeclareTransactionV0 { - /// The address of the account contract sending the declaration transaction pub sender_address: Felt, - /// The maximal fee that can be charged for including the transaction pub max_fee: Felt, - /// Signature pub signature: Vec, - /// The class to be declared pub contract_class: Arc, - /// If set to `true`, uses a query-only transaction version that's invalid for execution pub is_query: bool, } @@ -719,6 +712,8 @@ impl From for DataAvailabilityMode { #[cfg(test)] mod tests { + use std::sync::Arc; + use super::*; use mp_class::{CompressedLegacyContractClass, LegacyEntryPointsByType}; diff --git a/crates/primitives/utils/src/parsers.rs b/crates/primitives/utils/src/parsers.rs index 244b403ff..c961a56f2 100644 --- a/crates/primitives/utils/src/parsers.rs +++ b/crates/primitives/utils/src/parsers.rs @@ -37,7 +37,8 @@ pub fn parse_duration(s: &str) -> anyhow::Result { "ms" => Ok(Duration::from_millis(value)), "s" => Ok(Duration::from_secs(value)), "min" => Ok(Duration::from_secs(value * 60)), - _ => bail!("Invalid duration suffix: {}. Expected 'ms', 's', or 'min'.", suffix), + "h" => Ok(Duration::from_secs(value * 60 * 60)), + _ => bail!("Invalid duration suffix: {}. Expected 'ms', 's', 'min' or 'h'.", suffix), } } @@ -56,10 +57,10 @@ mod tests { assert_eq!(parse_duration("200ms").unwrap(), Duration::from_millis(200)); assert_eq!(parse_duration("5min").unwrap(), Duration::from_secs(300)); assert_eq!(parse_duration("1 min").unwrap(), Duration::from_secs(60)); + assert_eq!(parse_duration("5h").unwrap(), Duration::from_secs(5 * 60 * 60)); assert_eq!(parse_duration("10 s").unwrap(), Duration::from_secs(10)); assert!(parse_duration("2x").is_err()); assert!(parse_duration("200").is_err()); - assert!(parse_duration("5h").is_err()); assert!(parse_duration("ms200").is_err()); assert!(parse_duration("-5s").is_err()); assert!(parse_duration("5.5s").is_err()); diff --git a/crates/tests/src/lib.rs b/crates/tests/src/lib.rs index 32a6b1838..4deda5c74 100644 --- a/crates/tests/src/lib.rs +++ b/crates/tests/src/lib.rs @@ -6,10 +6,14 @@ mod storage_proof; use anyhow::bail; use rstest::rstest; +use starknet::accounts::{Account, ExecutionEncoding, SingleOwnerAccount}; +use starknet::signers::{LocalWallet, SigningKey}; +use starknet_core::types::{BlockId, BlockTag, Call, Felt}; +use starknet_core::utils::starknet_keccak; use starknet_providers::Provider; use starknet_providers::{jsonrpc::HttpTransport, JsonRpcClient, Url}; -use std::ops::Range; -use std::sync::Mutex; +use std::ops::{Deref, Range}; +use std::sync::{Arc, Mutex}; use std::{ collections::HashMap, env, @@ -44,9 +48,9 @@ async fn wait_for_cond>>( pub struct MadaraCmd { process: Option, ready: bool, - json_rpc: Option>, + json_rpc: JsonRpcClient, rpc_url: Url, - tempdir: TempDir, + tempdir: Arc, _port: MadaraPortNum, } @@ -55,8 +59,8 @@ impl MadaraCmd { self.process.take().unwrap().wait_with_output().unwrap() } - pub fn json_rpc(&mut self) -> &JsonRpcClient { - self.json_rpc.get_or_insert_with(|| JsonRpcClient::new(HttpTransport::new(self.rpc_url.clone()))) + pub fn json_rpc(&self) -> &JsonRpcClient { + &self.json_rpc } pub fn db_dir(&self) -> &Path { @@ -71,8 +75,8 @@ impl MadaraCmd { res.error_for_status()?; anyhow::Ok(()) }, - Duration::from_secs(2), - 10, + Duration::from_millis(500), + 20, ) .await; self.ready = true; @@ -146,6 +150,7 @@ lazy_static::lazy_static! { static ref AVAILABLE_PORTS: Mutex>> = Mutex::new(AvailablePorts { to_reuse: vec![], next: PORT_RANGE }); } +#[derive(Clone)] pub struct MadaraPortNum(pub u16); impl Drop for MadaraPortNum { fn drop(&mut self) { @@ -163,10 +168,11 @@ pub fn get_port() -> MadaraPortNum { MadaraPortNum(port) } +#[derive(Clone)] pub struct MadaraCmdBuilder { args: Vec, env: HashMap, - tempdir: TempDir, + tempdir: Arc, port: MadaraPortNum, } @@ -181,7 +187,7 @@ impl MadaraCmdBuilder { Self { args: Default::default(), env: Default::default(), - tempdir: TempDir::with_prefix("madara-test").unwrap(), + tempdir: Arc::new(TempDir::with_prefix("madara-test").unwrap()), port: get_port(), } } @@ -213,7 +219,7 @@ impl MadaraCmdBuilder { .into_iter() .chain([ "--base-path".into(), - format!("{}", self.tempdir.as_ref().display()), + format!("{}", self.tempdir.deref().as_ref().display()), "--rpc-port".into(), format!("{}", self.port.0), ]) @@ -223,11 +229,12 @@ impl MadaraCmdBuilder { .spawn() .unwrap(); + let rpc_url = Url::parse(&format!("http://127.0.0.1:{}/", self.port.0)).unwrap(); MadaraCmd { process: Some(process), ready: false, - json_rpc: None, - rpc_url: Url::parse(&format!("http://127.0.0.1:{}/", self.port.0)).unwrap(), + json_rpc: JsonRpcClient::new(HttpTransport::new(rpc_url.clone())), + rpc_url, tempdir: self.tempdir, _port: self.port, } @@ -277,3 +284,198 @@ async fn madara_can_sync_a_few_blocks() { } ); } + +const ERC20_STRK_CONTRACT_ADDRESS: Felt = + Felt::from_hex_unchecked("0x04718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d"); +#[allow(unused)] +const ERC20_ETH_CONTRACT_ADDRESS: Felt = + Felt::from_hex_unchecked("0x049d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7"); + +const ACCOUNT_SECRET: Felt = + Felt::from_hex_unchecked("0x077e56c6dc32d40a67f6f7e6625c8dc5e570abe49c0a24e9202e4ae906abcc07"); +const ACCOUNT_ADDRESS: Felt = + Felt::from_hex_unchecked("0x055be462e718c4166d656d11f89e341115b8bc82389c3762a10eade04fcb225d"); + +#[rstest] +#[tokio::test] +async fn madara_devnet_add_transaction() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let args = &[ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // only produce blocks no pending txs + "--chain-config-override", + "block_time=1s,pending_block_update_time=1s", + ]; + + let cmd_builder = MadaraCmdBuilder::new().args(*args); + let mut node = cmd_builder.run(); + node.wait_for_ready().await; + + tokio::time::sleep(Duration::from_secs(3)).await; + + let chain_id = node.json_rpc().chain_id().await.unwrap(); + + let signer = LocalWallet::from_signing_key(SigningKey::from_secret_scalar(ACCOUNT_SECRET)); + let mut account = + SingleOwnerAccount::new(node.json_rpc(), signer, ACCOUNT_ADDRESS, chain_id, ExecutionEncoding::New); + account.set_block_id(BlockId::Tag(BlockTag::Latest)); + + let res = account + .execute_v3(vec![Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: starknet_keccak(b"transfer"), + calldata: vec![ACCOUNT_ADDRESS, 15.into(), Felt::ZERO], + }]) + .send() + .await + .unwrap(); + + wait_for_cond( + || async { + let _receipt = node.json_rpc().get_transaction_receipt(res.transaction_hash).await?; + Ok(()) + }, + Duration::from_millis(500), + 60, + ) + .await; +} + +#[rstest] +#[tokio::test] +async fn madara_devnet_mempool_saving() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let cmd_builder = MadaraCmdBuilder::new().args([ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // never produce blocks & pending txs + "--chain-config-path", + "test_devnet.yaml", + "--chain-config-override", + "block_time=5min,pending_block_update_time=5min", + ]); + let mut node = cmd_builder.clone().run(); + node.wait_for_ready().await; + + let chain_id = node.json_rpc().chain_id().await.unwrap(); + + let signer = LocalWallet::from_signing_key(SigningKey::from_secret_scalar(ACCOUNT_SECRET)); + let mut account = + SingleOwnerAccount::new(node.json_rpc(), signer, ACCOUNT_ADDRESS, chain_id, ExecutionEncoding::New); + account.set_block_id(BlockId::Tag(BlockTag::Pending)); + + let res = account + .execute_v3(vec![Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: starknet_keccak(b"transfer"), + calldata: vec![ACCOUNT_ADDRESS, 15.into(), Felt::ZERO], + }]) + .send() + .await + .unwrap(); + + drop(node); + + // tx should be in saved mempool + + let cmd_builder = cmd_builder.args([ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // never produce blocks but produce pending txs + "--chain-config-path", + "test_devnet.yaml", + "--chain-config-override", + "block_time=5min,pending_block_update_time=500ms", + ]); + let mut node = cmd_builder.clone().run(); + node.wait_for_ready().await; + + // tx should be in mempool + + wait_for_cond( + || async { + let _receipt = node.json_rpc().get_transaction_receipt(res.transaction_hash).await?; + Ok(()) + }, + Duration::from_millis(500), + 60, + ) + .await; +} + +#[rstest] +#[tokio::test] +async fn madara_devnet_continue_pending() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let cmd_builder = MadaraCmdBuilder::new().args([ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // never produce blocks but produce pending txs + "--chain-config-path", + "test_devnet.yaml", + "--chain-config-override", + "block_time=5min,pending_block_update_time=500ms", + ]); + let mut node = cmd_builder.clone().run(); + node.wait_for_ready().await; + + let chain_id = node.json_rpc().chain_id().await.unwrap(); + + let signer = LocalWallet::from_signing_key(SigningKey::from_secret_scalar(ACCOUNT_SECRET)); + let mut account = + SingleOwnerAccount::new(node.json_rpc(), signer, ACCOUNT_ADDRESS, chain_id, ExecutionEncoding::New); + account.set_block_id(BlockId::Tag(BlockTag::Pending)); + + let res = account + .execute_v3(vec![Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: starknet_keccak(b"transfer"), + calldata: vec![ACCOUNT_ADDRESS, 15.into(), Felt::ZERO], + }]) + .send() + .await + .unwrap(); + + wait_for_cond( + || async { + let _receipt = node.json_rpc().get_transaction_receipt(res.transaction_hash).await?; + Ok(()) + }, + Duration::from_millis(500), + 60, + ) + .await; + + drop(node); + + // tx should appear in saved pending block + + let cmd_builder = cmd_builder.args([ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // never produce blocks never produce pending txs + "--chain-config-path", + "test_devnet.yaml", + "--chain-config-override", + "block_time=5min,pending_block_update_time=5min", + ]); + let mut node = cmd_builder.clone().run(); + node.wait_for_ready().await; + + // should find receipt + let _receipt = node.json_rpc().get_transaction_receipt(res.transaction_hash).await.unwrap(); +} diff --git a/crates/tests/test_devnet.yaml b/crates/tests/test_devnet.yaml new file mode 100644 index 000000000..eb50e6487 --- /dev/null +++ b/crates/tests/test_devnet.yaml @@ -0,0 +1,34 @@ +chain_name: "Test devnet" +chain_id: "MADARA_DEVNET" +feeder_gateway_url: "http://localhost:8080/feeder_gateway/" +gateway_url: "http://localhost:8080/gateway/" +native_fee_token_address: "0x04718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" +parent_fee_token_address: "0x049d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7" +latest_protocol_version: "0.13.2" +block_time: "10s" +pending_block_update_time: "500ms" +execution_batch_size: 16 +bouncer_config: + block_max_capacity: + builtin_count: + add_mod: 18446744073709551615 + bitwise: 18446744073709551615 + ecdsa: 18446744073709551615 + ec_op: 18446744073709551615 + keccak: 18446744073709551615 + mul_mod: 18446744073709551615 + pedersen: 18446744073709551615 + poseidon: 18446744073709551615 + range_check: 18446744073709551615 + range_check96: 18446744073709551615 + gas: 18446744073709551615 + n_steps: 18446744073709551615 + message_segment_length: 18446744073709551615 + n_events: 18446744073709551615 + state_diff_size: 131072 +sequencer_address: "0x123" +eth_core_contract_address: "0xe7f1725e7734ce288f8367e1bb143e90bb3f0512" +eth_gps_statement_verifier: "0xf294781D719D2F4169cE54469C28908E6FA752C1" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/scripts/e2e-coverage.sh b/scripts/e2e-coverage.sh index 3022de323..acfed971e 100755 --- a/scripts/e2e-coverage.sh +++ b/scripts/e2e-coverage.sh @@ -19,7 +19,10 @@ subshell() { sleep 1 done - cargo test --profile dev --workspace $@ + + ARGS=$@ + export PROPTEST_CASES=5 + cargo test --profile dev ${ARGS:=--workspace} cargo llvm-cov report --lcov --output-path lcov.info cargo llvm-cov report diff --git a/scripts/e2e-tests.sh b/scripts/e2e-tests.sh index ae77bb00c..362d5989d 100755 --- a/scripts/e2e-tests.sh +++ b/scripts/e2e-tests.sh @@ -22,7 +22,9 @@ subshell() { sleep 1 done - cargo test --profile dev --workspace $@ + ARGS=$@ + export PROPTEST_CASES=5 + cargo test --profile dev ${ARGS:=--workspace} } (subshell $@ && r=$?) || r=$?