diff --git a/Cargo.lock b/Cargo.lock index 4581fb9ce05..74d1a4be246 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2370,6 +2370,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "ef_tests" version = "0.2.0" @@ -2487,6 +2499,26 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "env_logger" version = "0.8.4" @@ -2847,20 +2879,25 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.7.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e999563461faea0ab9bc0024e5e66adcee35881f3d5062f52f31a4070fe1522" +checksum = "86da3096d1304f5f28476ce383005385459afeaf0eea08592b65ddbc9b258d16" dependencies = [ "alloy-primitives", + "arbitrary", + "ethereum_serde_utils", "itertools 0.13.0", + "serde", + "serde_derive", "smallvec", + "typenum", ] [[package]] name = "ethereum_ssz_derive" -version = "0.7.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3deae99c8e74829a00ba7a92d49055732b3c1f093f2ccfa3cbc621679b6fa91" +checksum = "d832a5c38eba0e7ad92592f7a22d693954637fbb332b4f669590d66a5c3183e5" dependencies = [ "darling 0.20.10", "proc-macro2", @@ -5747,13 +5784,13 @@ dependencies = [ [[package]] name = "milhouse" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f68e33f98199224d1073f7c1468ea6abfea30736306fb79c7181a881e97ea32f" +checksum = "eb1ada1f56cc1c79f40517fdcbf57e19f60424a3a1ce372c3fe9b22e4fdd83eb" dependencies = [ "alloy-primitives", "arbitrary", - "derivative", + "educe", "ethereum_hashing", "ethereum_ssz", "ethereum_ssz_derive", @@ -8527,12 +8564,11 @@ dependencies = [ [[package]] name = "ssz_types" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e0719d2b86ac738a55ae71a8429f52aa2741da988f1fd0975b4cc610fd1e08" +checksum = "22bc24c8a61256950632fb6b68ea09f6b5c988070924c6292eb5933635202e00" dependencies = [ "arbitrary", - "derivative", "ethereum_serde_utils", "ethereum_ssz", "itertools 0.13.0", @@ -9442,20 +9478,22 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.8.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373495c23db675a5192de8b610395e1bec324d596f9e6111192ce903dc11403a" +checksum = "6c58eb0f518840670270d90d97ffee702d8662d9c5494870c9e1e9e0fa00f668" dependencies = [ "alloy-primitives", "ethereum_hashing", + "ethereum_ssz", "smallvec", + "typenum", ] [[package]] name = "tree_hash_derive" -version = "0.8.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0857056ca4eb5de8c417309be42bcff6017b47e86fbaddde609b4633f66061e" +checksum = "699e7fb6b3fdfe0c809916f251cf5132d64966858601695c3736630a87e7166a" dependencies = [ "darling 0.20.10", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 73912f60822..2f97769849f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -139,8 +139,8 @@ discv5 = { version = "0.9", features = ["libp2p"] } env_logger = "0.9" ethereum_hashing = "0.7.0" ethereum_serde_utils = "0.7" -ethereum_ssz = "0.7" -ethereum_ssz_derive = "0.7" +ethereum_ssz = "0.8.2" +ethereum_ssz_derive = "0.8.2" ethers-core = "1" ethers-providers = { version = "1", default-features = false } exit-future = "0.2" @@ -156,7 +156,7 @@ libsecp256k1 = "0.7" log = "0.4" lru = "0.12" maplit = "1" -milhouse = "0.3" +milhouse = "0.5" mockito = "1.5.0" num_cpus = "1" parking_lot = "0.12" @@ -194,7 +194,7 @@ slog-term = "2" sloggers = { version = "2", features = ["json"] } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" -ssz_types = "0.8" +ssz_types = "0.10" strum = { version = "0.24", features = ["derive"] } superstruct = "0.8" syn = "1" @@ -213,8 +213,8 @@ tracing-appender = "0.2" tracing-core = "0.1" tracing-log = "0.2" tracing-subscriber = { version = "0.3", features = ["env-filter"] } -tree_hash = "0.8" -tree_hash_derive = "0.8" +tree_hash = "0.9" +tree_hash_derive = "0.9" url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } warp = { version = "0.3.7", default-features = false, features = ["tls"] } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ca21b519f15..c398d5b7d58 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1220,7 +1220,7 @@ impl BeaconChain { debug!( self.log, "Reconstructed txn"; - "bytes" => format!("0x{}", hex::encode(&**txn)), + "bytes" => format!("0x{}", hex::encode(txn)), ); } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 4526b2b3607..3395c40d89f 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -3218,7 +3218,7 @@ pub fn generate_rand_block_and_blobs( execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { + for tx in &transactions { payload.execution_payload.transactions.push(tx).unwrap(); } message.body.blob_kzg_commitments = bundle.commitments.clone(); @@ -3237,7 +3237,7 @@ pub fn generate_rand_block_and_blobs( let (bundle, transactions) = execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { + for tx in &transactions { payload.execution_payload.transactions.push(tx).unwrap(); } message.body.blob_kzg_commitments = bundle.commitments.clone(); @@ -3256,7 +3256,7 @@ pub fn generate_rand_block_and_blobs( let (bundle, transactions) = execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { + for tx in &transactions { payload.execution_payload.transactions.push(tx).unwrap(); } message.body.blob_kzg_commitments = bundle.commitments.clone(); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index be7045c54a9..7ce73b6d9c9 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -296,8 +296,7 @@ async fn test_rewards_base_multi_inclusion() { .extend_slots(E::slots_per_epoch() as usize * 2 - 4) .await; - // pin to reduce stack size for clippy - Box::pin(check_all_base_rewards(&harness, initial_balances)).await; + check_all_base_rewards(&harness, initial_balances).await; } #[tokio::test] @@ -583,7 +582,8 @@ async fn check_all_base_rewards( harness: &BeaconChainHarness>, balances: Vec, ) { - check_all_base_rewards_for_subset(harness, balances, vec![]).await; + // The box reduces the size on the stack for a clippy lint. + Box::pin(check_all_base_rewards_for_subset(harness, balances, vec![])).await; } async fn check_all_base_rewards_for_subset( diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index d3a32c7929b..46d60952f97 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -22,9 +22,7 @@ pub fn calculate_execution_block_hash( // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a // better alternative when one appears, possibly following Reth. - let rlp_transactions_root = ordered_trie_root::( - payload.transactions().iter().map(|txn_bytes| &**txn_bytes), - ); + let rlp_transactions_root = ordered_trie_root::(payload.transactions().iter()); // Calculate withdrawals root (post-Capella). let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 747383754a5..70c51e25fe5 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -1551,14 +1551,11 @@ mod test { /// Example: if `spec == &[1, 1]`, then two one-byte transactions will be created. fn generate_transactions(spec: &[usize]) -> Transactions { - let mut txs = VariableList::default(); + let mut txs = Transactions::default(); for &num_bytes in spec { - let mut tx = VariableList::default(); - for _ in 0..num_bytes { - tx.push(0).unwrap(); - } - txs.push(tx).unwrap(); + let tx = vec![0; num_bytes]; + txs.push(&tx).unwrap(); } txs diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 96615297d83..85d66e2cfc3 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -98,7 +98,6 @@ pub struct JsonExecutionPayload { pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, - #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, #[superstruct(only(V2, V3, V4, V5))] pub withdrawals: VariableList, @@ -884,7 +883,6 @@ impl From for JsonForkchoiceUpdatedV1Response { #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct JsonExecutionPayloadBodyV1 { - #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, pub withdrawals: Option>, } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 9fa375b3757..d7a0d5550f2 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -12,7 +12,6 @@ use parking_lot::Mutex; use rand::{rngs::StdRng, Rng, SeedableRng}; use serde::{Deserialize, Serialize}; use ssz::Decode; -use ssz_types::VariableList; use std::collections::HashMap; use std::sync::Arc; use tree_hash::TreeHash; @@ -20,8 +19,7 @@ use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, - ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, Transaction, Transactions, - Uint256, + ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; @@ -706,7 +704,7 @@ impl ExecutionBlockGenerator { as usize; let num_blobs = rng.gen::() % (max_blobs + 1); let (bundle, transactions) = generate_blobs(num_blobs)?; - for tx in Vec::from(transactions) { + for tx in &transactions { execution_payload .transactions_mut() .push(tx) @@ -751,13 +749,14 @@ pub fn generate_blobs( let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; let mut bundle = BlobsBundle::::default(); - let mut transactions = vec![]; + let mut transactions = Transactions::default(); for blob_index in 0..n_blobs { - let tx = static_valid_tx::() - .map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?; + let tx = static_valid_tx::(); - transactions.push(tx); + transactions + .push(&tx) + .map_err(|e| format!("invalid tx: {e:?}"))?; bundle .blobs .push(blob.clone()) @@ -772,10 +771,10 @@ pub fn generate_blobs( .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; } - Ok((bundle, transactions.into())) + Ok((bundle, transactions)) } -pub fn static_valid_tx() -> Result, String> { +pub fn static_valid_tx() -> Vec { // This is a real transaction hex encoded, but we don't care about the contents of the transaction. let transaction: EthersTransaction = serde_json::from_str( r#"{ @@ -796,8 +795,7 @@ pub fn static_valid_tx() -> Result PayloadId { diff --git a/beacon_node/execution_layer/src/versioned_hashes.rs b/beacon_node/execution_layer/src/versioned_hashes.rs index 97c3100de99..8ad2e60438e 100644 --- a/beacon_node/execution_layer/src/versioned_hashes.rs +++ b/beacon_node/execution_layer/src/versioned_hashes.rs @@ -1,6 +1,6 @@ use alloy_consensus::TxEnvelope; use alloy_rlp::Decodable; -use types::{EthSpec, ExecutionPayloadRef, Hash256, Unsigned, VersionedHash}; +use types::{EthSpec, ExecutionPayloadRef, Hash256, VersionedHash}; #[derive(Debug)] pub enum Error { @@ -59,10 +59,8 @@ pub fn extract_versioned_hashes_from_transactions( Ok(versioned_hashes) } -pub fn beacon_tx_to_tx_envelope( - tx: &types::Transaction, -) -> Result { - let tx_bytes = Vec::from(tx.clone()); +pub fn beacon_tx_to_tx_envelope(tx: &[u8]) -> Result { + let tx_bytes = Vec::from(tx); TxEnvelope::decode(&mut tx_bytes.as_slice()) .map_err(|e| Error::DecodingTransaction(e.to_string())) } @@ -78,7 +76,7 @@ mod test { #[test] fn test_decode_static_transaction() { - let valid_tx = static_valid_tx::().expect("should give me known valid transaction"); + let valid_tx = static_valid_tx::(); let tx_envelope = beacon_tx_to_tx_envelope(&valid_tx).expect("should decode tx"); let TxEnvelope::Legacy(signed_tx) = tx_envelope else { panic!("should decode to legacy transaction"); @@ -98,15 +96,9 @@ mod test { #[test] fn test_extract_versioned_hashes() { - use serde::Deserialize; + use types::Transactions; - #[derive(Deserialize)] - #[serde(transparent)] - struct TestTransactions( - #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] types::Transactions, - ); - - let TestTransactions(raw_transactions): TestTransactions = serde_json::from_str(r#"[ + let raw_transactions: Transactions = serde_json::from_str(r#"[ "0x03f901388501a1f0ff430f843b9aca00843b9aca0082520894e7249813d8ccf6fa95a2203f46a64166073d58878080c002f8c6a0012e98362c814f1724262c0d211a1463418a5f6382a8d457b37a2698afbe7b5ea00100ef985761395dfa8ed5ce91f3f2180b612401909e4cb8f33b90c8a454d9baa0013d45411623b90d90f916e4025ada74b453dd4ca093c017c838367c9de0f801a001753e2af0b1e70e7ef80541355b2a035cc9b2c177418bb2a4402a9b346cf84da0011789b520a8068094a92aa0b04db8d8ef1c6c9818947c5210821732b8744049a0011c4c4f95597305daa5f62bf5f690e37fa11f5de05a95d05cac4e2119e394db80a0ccd86a742af0e042d08cbb35d910ddc24bbc6538f9e53be6620d4b6e1bb77662a01a8bacbc614940ac2f5c23ffc00a122c9f085046883de65c88ab0edb859acb99", "0x02f9017a8501a1f0ff4382363485012a05f2008512a05f2000830249f094c1b0bc605e2c808aa0867bfc98e51a1fe3e9867f80b901040cc7326300000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000009445a285baa43e00000000000000000000000000c500931f24edb821cef6e28f7adb33b38578c82000000000000000000000000fc7360b3b28cf4204268a8354dbec60720d155d2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000009a054a063f0fe7b9c68de8df91aaa5e96c15ab540000000000000000000000000c8d41b8fcc066cdabaf074d78e5153e8ce018a9c080a008e14475c1173cd9f5740c24c08b793f9e16c36c08fa73769db95050e31e3396a019767dcdda26c4a774ca28c9df15d0c20e43bd07bd33ee0f84d6096cb5a1ebed" ]"#).expect("should get raw transactions"); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 2bf35b0e35e..07631970bc5 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -765,8 +765,8 @@ fn handle_rpc_response( SupportedProtocol::PingV1 => Ok(Some(RpcSuccessResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), - SupportedProtocol::MetaDataV1 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V1( - MetaDataV1::from_ssz_bytes(decoded_buffer)?, + SupportedProtocol::MetaDataV1 => Ok(Some(RpcSuccessResponse::MetaData(Arc::new( + MetaData::V1(MetaDataV1::from_ssz_bytes(decoded_buffer)?), )))), SupportedProtocol::LightClientBootstrapV1 => match fork_name { Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientBootstrap(Arc::new( @@ -826,11 +826,11 @@ fn handle_rpc_response( )), }, // MetaData V2/V3 responses have no context bytes, so behave similarly to V1 responses - SupportedProtocol::MetaDataV3 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V3( - MetaDataV3::from_ssz_bytes(decoded_buffer)?, + SupportedProtocol::MetaDataV3 => Ok(Some(RpcSuccessResponse::MetaData(Arc::new( + MetaData::V3(MetaDataV3::from_ssz_bytes(decoded_buffer)?), )))), - SupportedProtocol::MetaDataV2 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V2( - MetaDataV2::from_ssz_bytes(decoded_buffer)?, + SupportedProtocol::MetaDataV2 => Ok(Some(RpcSuccessResponse::MetaData(Arc::new( + MetaData::V2(MetaDataV2::from_ssz_bytes(decoded_buffer)?), )))), SupportedProtocol::BlocksByRangeV2 => match fork_name { Some(ForkName::Altair) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( @@ -933,6 +933,7 @@ mod tests { use super::*; use crate::rpc::protocol::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; + use types::Transactions; use types::{ blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, DataColumnIdentifier, EmptyBlock, @@ -1001,6 +1002,14 @@ mod tests { }) } + fn transactions(count: usize) -> Transactions { + let mut transactions = Transactions::default(); + for _ in 0..count { + transactions.push(&[0; 1024]).unwrap() + } + transactions + } + /// Bellatrix block with length < max_rpc_size. fn bellatrix_block_small( fork_context: &ForkContext, @@ -1008,10 +1017,8 @@ mod tests { ) -> SignedBeaconBlock { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); - block.body.execution_payload.execution_payload.transactions = txs; + block.body.execution_payload.execution_payload.transactions = transactions(5000); let block = BeaconBlock::Bellatrix(block); assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); @@ -1027,10 +1034,8 @@ mod tests { ) -> SignedBeaconBlock { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); - block.body.execution_payload.execution_payload.transactions = txs; + block.body.execution_payload.execution_payload.transactions = transactions(100000); let block = BeaconBlock::Bellatrix(block); assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); @@ -1105,28 +1110,31 @@ mod tests { Ping { data: 1 } } - fn metadata() -> MetaData { + fn metadata() -> Arc> { MetaData::V1(MetaDataV1 { seq_number: 1, attnets: EnrAttestationBitfield::::default(), }) + .into() } - fn metadata_v2() -> MetaData { + fn metadata_v2() -> Arc> { MetaData::V2(MetaDataV2 { seq_number: 1, attnets: EnrAttestationBitfield::::default(), syncnets: EnrSyncCommitteeBitfield::::default(), }) + .into() } - fn metadata_v3() -> MetaData { + fn metadata_v3() -> Arc> { MetaData::V3(MetaDataV3 { seq_number: 1, attnets: EnrAttestationBitfield::::default(), syncnets: EnrSyncCommitteeBitfield::::default(), custody_group_count: 1, }) + .into() } /// Encodes the given protocol response as bytes. diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index ad6bea455ec..0f7daa9feb4 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -557,7 +557,7 @@ pub enum RpcSuccessResponse { Pong(Ping), /// A response to a META_DATA request. - MetaData(MetaData), + MetaData(Arc>), } /// Indicates which response is being terminated by a stream termination response. diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 8586fd9cd36..9c3534aef90 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1186,7 +1186,7 @@ impl Network { ) { let metadata = self.network_globals.local_metadata.read().clone(); // The encoder is responsible for sending the negotiated version of the metadata - let event = RpcResponse::Success(RpcSuccessResponse::MetaData(metadata)); + let event = RpcResponse::Success(RpcSuccessResponse::MetaData(Arc::new(metadata))); self.eth2_rpc_mut() .send_response(peer_id, id, request_id, event); } @@ -1601,7 +1601,7 @@ impl Network { } RpcSuccessResponse::MetaData(meta_data) => { self.peer_manager_mut() - .meta_data_response(&peer_id, meta_data); + .meta_data_response(&peer_id, meta_data.as_ref().clone()); None } /* Network propagated protocols */ diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 4b54a24ddc8..5967ca42dc5 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -8,7 +8,6 @@ use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Response}; use slog::{debug, warn, Level}; use ssz::Encode; -use ssz_types::VariableList; use std::sync::Arc; use std::time::Duration; use tokio::runtime::Runtime; @@ -16,18 +15,24 @@ use tokio::time::sleep; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, MinimalEthSpec, - RuntimeVariableList, Signature, SignedBeaconBlock, Slot, + RuntimeVariableList, Signature, SignedBeaconBlock, Slot, Transactions, }; type E = MinimalEthSpec; +fn transactions(n: usize) -> Transactions { + let mut transactions = Transactions::default(); + for _ in 0..n { + transactions.push(&[0; 1024]).unwrap() + } + transactions +} + /// Bellatrix block with length < max_rpc_size. fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); - block.body.execution_payload.execution_payload.transactions = txs; + block.body.execution_payload.execution_payload.transactions = transactions(5000); let block = BeaconBlock::Bellatrix(block); assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); @@ -39,10 +44,8 @@ fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> Beacon /// Hence, we generate a bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. fn bellatrix_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); - block.body.execution_payload.execution_payload.transactions = txs; + block.body.execution_payload.execution_payload.transactions = transactions(100_000); let block = BeaconBlock::Bellatrix(block); assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index f45c55a7acf..7485e365ec2 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -19,9 +19,10 @@ pub enum EpochProcessingError { BeaconStateError(BeaconStateError), InclusionError(InclusionError), SszTypesError(ssz_types::Error), + BitfieldError(ssz::BitfieldError), ArithError(safe_arith::ArithError), InconsistentStateFork(InconsistentFork), - InvalidJustificationBit(ssz_types::Error), + InvalidJustificationBit(ssz::BitfieldError), InvalidFlagIndex(usize), MilhouseError(milhouse::Error), EpochCache(EpochCacheError), @@ -49,6 +50,12 @@ impl From for EpochProcessingError { } } +impl From for EpochProcessingError { + fn from(e: ssz::BitfieldError) -> EpochProcessingError { + EpochProcessingError::BitfieldError(e) + } +} + impl From for EpochProcessingError { fn from(e: safe_arith::ArithError) -> EpochProcessingError { EpochProcessingError::ArithError(e) diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 276b27b0f8a..da27c53d295 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -19,6 +19,7 @@ use super::{ #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), + BitfieldError(ssz::BitfieldError), AlreadySigned(usize), SubnetCountIsZero(ArithError), IncorrectStateVariant, @@ -229,7 +230,7 @@ impl Attestation { } } - pub fn get_aggregation_bit(&self, index: usize) -> Result { + pub fn get_aggregation_bit(&self, index: usize) -> Result { match self { Attestation::Base(att) => att.aggregation_bits.get(index), Attestation::Electra(att) => att.aggregation_bits.get(index), @@ -359,13 +360,13 @@ impl AttestationElectra { if self .aggregation_bits .get(committee_position) - .map_err(Error::SszTypesError)? + .map_err(Error::BitfieldError)? { Err(Error::AlreadySigned(committee_position)) } else { self.aggregation_bits .set(committee_position, true) - .map_err(Error::SszTypesError)?; + .map_err(Error::BitfieldError)?; self.signature.add_assign(signature); @@ -433,13 +434,13 @@ impl AttestationBase { if self .aggregation_bits .get(committee_position) - .map_err(Error::SszTypesError)? + .map_err(Error::BitfieldError)? { Err(Error::AlreadySigned(committee_position)) } else { self.aggregation_bits .set(committee_position, true) - .map_err(Error::SszTypesError)?; + .map_err(Error::BitfieldError)?; self.signature.add_assign(signature); @@ -449,7 +450,7 @@ impl AttestationBase { pub fn extend_aggregation_bits( &self, - ) -> Result, ssz_types::Error> { + ) -> Result, ssz::BitfieldError> { self.aggregation_bits.resize::() } } @@ -608,7 +609,9 @@ impl SingleAttestation { let mut aggregation_bits = BitList::with_capacity(committee.len()).map_err(|_| Error::InvalidCommitteeLength)?; - aggregation_bits.set(aggregation_bit, true)?; + aggregation_bits + .set(aggregation_bit, true) + .map_err(Error::BitfieldError)?; Ok(Attestation::Electra(AttestationElectra { aggregation_bits, @@ -638,12 +641,12 @@ mod tests { let attestation_data = size_of::(); let signature = size_of::(); - assert_eq!(aggregation_bits, 56); + assert_eq!(aggregation_bits, 152); assert_eq!(attestation_data, 128); assert_eq!(signature, 288 + 16); let attestation_expected = aggregation_bits + attestation_data + signature; - assert_eq!(attestation_expected, 488); + assert_eq!(attestation_expected, 584); assert_eq!( size_of::>(), attestation_expected @@ -661,13 +664,13 @@ mod tests { size_of::::MaxCommitteesPerSlot>>(); let signature = size_of::(); - assert_eq!(aggregation_bits, 56); - assert_eq!(committee_bits, 56); + assert_eq!(aggregation_bits, 152); + assert_eq!(committee_bits, 152); assert_eq!(attestation_data, 128); assert_eq!(signature, 288 + 16); let attestation_expected = aggregation_bits + committee_bits + attestation_data + signature; - assert_eq!(attestation_expected, 544); + assert_eq!(attestation_expected, 736); assert_eq!( size_of::>(), attestation_expected diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/checkpoint.rs index 044fc57f22a..0caaedce5fa 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/checkpoint.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Epoch, Hash256}; use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; +use ssz::{Decode, DecodeError, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -19,8 +19,6 @@ use tree_hash_derive::TreeHash; Hash, Serialize, Deserialize, - Encode, - Decode, TreeHash, TestRandom, )] @@ -29,6 +27,69 @@ pub struct Checkpoint { pub root: Hash256, } +/// Use a custom implementation of SSZ to avoid the overhead of the derive macro. +impl Encode for Checkpoint { + fn is_ssz_fixed_len() -> bool { + true + } + + #[allow(clippy::arithmetic_side_effects)] + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + ::ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.epoch.ssz_append(buf); + self.root.ssz_append(buf); + } + + #[allow(clippy::arithmetic_side_effects)] + fn ssz_bytes_len(&self) -> usize { + self.epoch.ssz_bytes_len() + self.root.ssz_bytes_len() + } +} + +/// Use a custom implementation of SSZ to avoid the overhead of the derive macro. +impl Decode for Checkpoint { + fn is_ssz_fixed_len() -> bool { + true + } + + #[allow(clippy::arithmetic_side_effects)] + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let expected = ::ssz_fixed_len(); + + let (epoch, root) = bytes + .split_at_checked(::ssz_fixed_len()) + .ok_or(DecodeError::InvalidByteLength { + len: bytes.len(), + expected, + })?; + + if root.len() != ::ssz_fixed_len() { + return Err(DecodeError::InvalidByteLength { + len: bytes.len(), + expected, + }); + } + + let epoch = { + let mut array = [0; 8]; + array.copy_from_slice(epoch); + u64::from_le_bytes(array) + }; + + Ok(Self { + epoch: Epoch::new(epoch), + root: Hash256::from_slice(root), + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 5d756c8529f..2789f479f69 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -6,12 +6,6 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; -pub type Transaction = VariableList; -pub type Transactions = VariableList< - Transaction<::MaxBytesPerTransaction>, - ::MaxTransactionsPerPayload, ->; - pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; #[superstruct( @@ -80,7 +74,6 @@ pub struct ExecutionPayload { pub base_fee_per_gas: Uint256, #[superstruct(getter(copy))] pub block_hash: ExecutionBlockHash, - #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub withdrawals: Withdrawals, diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 73a50b4ef3e..d2179a0a2ce 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -71,6 +71,7 @@ pub mod signed_voluntary_exit; pub mod signing_data; pub mod sync_committee_subscription; pub mod sync_duty; +pub mod transactions; pub mod validator; pub mod validator_subscription; pub mod voluntary_exit; @@ -165,8 +166,7 @@ pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; pub use crate::execution_payload::{ ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadRef, Transaction, Transactions, - Withdrawals, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadRef, Withdrawals, }; pub use crate::execution_payload_header::{ ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, @@ -256,6 +256,7 @@ pub use crate::sync_committee_subscription::SyncCommitteeSubscription; pub use crate::sync_duty::SyncDuty; pub use crate::sync_selection_proof::SyncSelectionProof; pub use crate::sync_subnet_id::SyncSubnetId; +pub use crate::transactions::Transactions; pub use crate::validator::Validator; pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 43f72a39240..12b91501ae0 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -11,6 +11,7 @@ use tree_hash_derive::TreeHash; #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), + BitfieldError(ssz::BitfieldError), ArithError(ArithError), } @@ -68,7 +69,7 @@ impl SyncAggregate { sync_aggregate .sync_committee_bits .set(participant_index, true) - .map_err(Error::SszTypesError)?; + .map_err(Error::BitfieldError)?; } } sync_aggregate diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index c348c3e8be3..e6870b9442f 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -10,6 +10,7 @@ use tree_hash_derive::TreeHash; #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), + BitfieldError(ssz::BitfieldError), AlreadySigned(usize), SubnetCountIsZero(ArithError), } @@ -53,7 +54,7 @@ impl SyncCommitteeContribution { ) -> Result { let mut bits = BitVector::new(); bits.set(validator_sync_committee_index, true) - .map_err(Error::SszTypesError)?; + .map_err(Error::BitfieldError)?; Ok(Self { slot: message.slot, beacon_block_root: message.beacon_block_root, diff --git a/consensus/types/src/transactions.rs b/consensus/types/src/transactions.rs new file mode 100644 index 00000000000..2cbec9de901 --- /dev/null +++ b/consensus/types/src/transactions.rs @@ -0,0 +1,665 @@ +use crate::test_utils::TestRandom; +use crate::EthSpec; +use arbitrary::Arbitrary; +use derivative::Derivative; +use rand::RngCore; +use serde::{ + ser::{SerializeSeq, Serializer}, + Deserialize, Deserializer, Serialize, +}; +use serde_utils::hex; +use ssz::{encode_length, read_offset, Decode, DecodeError, Encode, BYTES_PER_LENGTH_OFFSET}; +use std::iter::IntoIterator; +use std::marker::PhantomData; +use tree_hash::{mix_in_length, MerkleHasher, TreeHash}; + +/// Max number of transactions in a `TestRandom` instance. +const TEST_RANDOM_MAX_TX_COUNT: usize = 128; +/// Max length of a transaction in a `TestRandom` instance. +const TEST_RANDOM_MAX_TX_BYTES: usize = 1_024; + +#[derive(Debug)] +pub enum Error { + /// Exceeds `EthSpec::max_transactions_per_payload()` + TooManyTransactions, + /// Exceeds `EthSpec::max_bytes_per_transaction()` + TransactionTooBig, +} + +/// The list of transactions in an execution payload. +/// +/// This data-structure represents the transactions similarly to how they're +/// encoded as SSZ. This makes for fast and low-allocation-count `ssz::Decode`. +#[derive(Debug, Clone, Derivative)] +#[derivative(Default, PartialEq, Hash(bound = "E: EthSpec"))] +pub struct Transactions { + /// Points to the first byte of each transaction in `bytes`. + offsets: Vec, + /// All transactions, concatenated together. + bytes: Vec, + /// `EthSpec` to capture maximum allowed lengths. + _phantom: PhantomData, +} + +impl Transactions { + /// Creates an empty list. + pub fn empty() -> Self { + Self::default() + } + + /// Iterates all transactions in `self`. + pub fn iter(&self) -> impl Iterator { + self.into_iter() + } + + /// The number of transactions in `self``. + pub fn len(&self) -> usize { + self.offsets.len() + } + + /// True if there are no transactions in `self`. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// The length of the offset/fixed-length section of the SSZ bytes, when + /// serialized. + fn len_offset_bytes(&self) -> usize { + self.offsets.len().saturating_mul(BYTES_PER_LENGTH_OFFSET) + } +} + +impl Transactions { + /// Adds an `item` (i.e. transaction) to the list. + /// + /// ## Errors + /// + /// - If the `item` is longer than `EthSpec::max_bytes_per_transaction()`. + /// - If the operation would make this list longer than + /// `EthSpec::max_transactions_per_payload()`. + pub fn push(&mut self, item: &[u8]) -> Result<(), Error> { + let max_tx_count = ::max_transactions_per_payload(); + let max_tx_bytes = ::max_bytes_per_transaction(); + + if item.len() > max_tx_bytes { + Err(Error::TransactionTooBig) + } else if self.offsets.len() >= max_tx_count { + Err(Error::TooManyTransactions) + } else { + self.offsets.push(self.bytes.len()); + self.bytes.extend_from_slice(item); + Ok(()) + } + } +} + +impl From>> for Transactions { + fn from(v: Vec>) -> Self { + let mut txs = Self::default(); + for vec in v { + txs.push(&vec).unwrap(); + } + txs + } +} + +impl Encode for Transactions { + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_append(&self, buf: &mut Vec) { + let len_offset_bytes = self.len_offset_bytes(); + buf.reserve(self.ssz_bytes_len()); + for offset in &self.offsets { + let offset = offset.saturating_add(len_offset_bytes); + buf.extend_from_slice(&encode_length(offset)); + } + buf.extend_from_slice(&self.bytes); + } + + fn ssz_bytes_len(&self) -> usize { + self.len_offset_bytes().saturating_add(self.bytes.len()) + } +} + +impl Decode for Transactions { + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + if bytes.is_empty() { + return Ok(Self::default()); + } + + // - `offset_bytes`: first section of bytes with pointers to items. + // - `value_bytes`: the list items pointed to by `offset_bytes`. + let (offset_bytes, value_bytes) = { + let first_offset = read_offset(bytes)?; + + if first_offset % BYTES_PER_LENGTH_OFFSET != 0 || first_offset < BYTES_PER_LENGTH_OFFSET + { + return Err(DecodeError::InvalidListFixedBytesLen(first_offset)); + } + + bytes + .split_at_checked(first_offset) + .ok_or(DecodeError::OffsetOutOfBounds(first_offset))? + }; + + // Disallow lists that have too many transactions. + let num_items = offset_bytes.len() / BYTES_PER_LENGTH_OFFSET; + let max_tx_count = ::max_transactions_per_payload(); + if num_items > max_tx_count { + return Err(DecodeError::BytesInvalid(format!( + "List of {} txs exceeds maximum of {:?}", + num_items, max_tx_count + ))); + } + + let max_tx_bytes = ::max_bytes_per_transaction(); + let mut offsets = Vec::with_capacity(num_items); + let mut offset_iter = offset_bytes.chunks(BYTES_PER_LENGTH_OFFSET).peekable(); + while let Some(offset) = offset_iter.next() { + let offset = read_offset(offset)?; + + // Make the offset assume that the values start at index 0, rather + // than following the offset bytes. + let offset = offset + .checked_sub(offset_bytes.len()) + .ok_or(DecodeError::OffsetIntoFixedPortion(offset))?; + + // Disallow an offset that points outside of the value bytes. + if offset > value_bytes.len() { + return Err(DecodeError::OffsetOutOfBounds(offset)); + } + + // Read the next offset (if any) to determine the length of this + // transaction. + let next_offset = if let Some(next_offset) = offset_iter.peek() { + read_offset(next_offset)? + .checked_sub(offset_bytes.len()) + .ok_or(DecodeError::OffsetIntoFixedPortion(offset))? + } else { + value_bytes.len() + }; + + // Disallow any offset that is lower than the previous. + let tx_len = next_offset + .checked_sub(offset) + .ok_or(DecodeError::OffsetsAreDecreasing(offset))?; + + // Disallow transactions that are too large. + if tx_len > max_tx_bytes { + return Err(DecodeError::BytesInvalid(format!( + "length of {tx_len} exceeds maximum tx length of {max_tx_bytes}", + ))); + } + + offsets.push(offset); + } + + Ok(Self { + offsets, + bytes: value_bytes.to_vec(), + _phantom: PhantomData, + }) + } +} + +impl<'a, E> IntoIterator for &'a Transactions { + type Item = &'a [u8]; + type IntoIter = TransactionsIter<'a>; + + fn into_iter(self) -> TransactionsIter<'a> { + TransactionsIter { + offsets: &self.offsets, + bytes: &self.bytes, + } + } +} + +pub struct TransactionsIter<'a> { + offsets: &'a [usize], + bytes: &'a [u8], +} + +impl<'a> Iterator for TransactionsIter<'a> { + type Item = &'a [u8]; + + fn next(&mut self) -> Option { + let (offset, offsets) = self.offsets.split_first()?; + let next_offset = offsets.first().copied().unwrap_or(self.bytes.len()); + self.offsets = offsets; + self.bytes.get(*offset..next_offset) + } +} + +#[derive(Default)] +pub struct Visitor { + _phantom: PhantomData, +} + +impl<'a, E> serde::de::Visitor<'a> for Visitor +where + E: EthSpec, +{ + type Value = Transactions; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a list of 0x-prefixed hex bytes") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'a>, + { + let mut txs: Transactions = <_>::default(); + + while let Some(hex_str) = seq.next_element::()? { + let bytes = hex::decode(&hex_str).map_err(serde::de::Error::custom)?; + txs.push(&bytes).map_err(|e| { + serde::de::Error::custom(format!("failed to deserialize transaction: {:?}.", e)) + })?; + } + + Ok(txs) + } +} + +impl Serialize for Transactions { + fn serialize(&self, serializer: S) -> Result { + let mut seq = serializer.serialize_seq(Some(self.len()))?; + for bytes in self { + seq.serialize_element(&hex::encode(bytes))?; + } + seq.end() + } +} + +impl<'de, E: EthSpec> Deserialize<'de> for Transactions { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_seq(Visitor::default()) + } +} + +impl TreeHash for Transactions { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::List + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + panic!("transactions should never be packed") + } + + fn tree_hash_packing_factor() -> usize { + panic!("transactions should never be packed") + } + + #[allow(clippy::arithmetic_side_effects)] + fn tree_hash_root(&self) -> tree_hash::Hash256 { + let max_tx_count = ::max_transactions_per_payload(); + let max_tx_len = ::max_bytes_per_transaction(); + let bytes_per_leaf = 32; + let tx_leaf_count = max_tx_len.div_ceil(bytes_per_leaf); + + let mut hasher = MerkleHasher::with_leaves(max_tx_count); + + for tx in self.iter() { + // Produce a "leaf" hash of the transaction. This is the merkle root + // of the transaction. + let leaf = { + let mut leaf_hasher = MerkleHasher::with_leaves(tx_leaf_count); + leaf_hasher + .write(tx) + .expect("tx too large for hasher write, logic error"); + let leaf = leaf_hasher + .finish() + .expect("tx too large for hasher finish, logic error"); + mix_in_length(&leaf, tx.len()) + }; + // Add the leaf hash to the main tree. + hasher + .write(leaf.as_slice()) + .expect("cannot add leaf to transactions hash tree, logic error"); + } + + let root = hasher + .finish() + .expect("cannot finish transactions hash tree, logic error"); + mix_in_length(&root, self.len()) + } +} + +impl TestRandom for Transactions { + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut txs = Self::default(); + let num_txs = rng.next_u32() as usize % TEST_RANDOM_MAX_TX_COUNT; + for _ in 0..num_txs { + let tx_len = rng.next_u32() as usize % TEST_RANDOM_MAX_TX_BYTES; + let mut tx = vec![0; tx_len]; + rng.fill_bytes(&mut tx[..]); + txs.push(&tx).unwrap(); + } + txs + } +} + +impl Arbitrary<'_> for Transactions { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + let mut txs = Self::default(); + let num_txs = usize::arbitrary(u).unwrap() % TEST_RANDOM_MAX_TX_COUNT; + for _ in 0..num_txs { + let tx_len = usize::arbitrary(u).unwrap() % TEST_RANDOM_MAX_TX_BYTES; + let mut tx = vec![0; tx_len]; + u.fill_buffer(&mut tx[..]).unwrap(); + txs.push(&tx).unwrap(); + } + Ok(txs) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + test_utils::{SeedableRng, XorShiftRng}, + MainnetEthSpec, VariableList, + }; + + type E = MainnetEthSpec; + pub type ReferenceTransaction = VariableList; + pub type ReferenceTransactions = VariableList< + ReferenceTransaction<::MaxBytesPerTransaction>, + ::MaxTransactionsPerPayload, + >; + + const NUM_RANDOM_VECTORS: usize = 256; + + struct TestVector { + name: String, + vector: Vec>, + } + + struct TestVectors { + vectors: Vec, + } + + impl Default for TestVectors { + fn default() -> Self { + let mut vectors = vec![ + TestVector { + name: "empty".into(), + vector: vec![], + }, + TestVector { + name: "single_item_single_element".into(), + vector: vec![vec![0]], + }, + TestVector { + name: "two_items_single_element".into(), + vector: vec![vec![0], vec![1]], + }, + TestVector { + name: "three_items_single_element".into(), + vector: vec![vec![0], vec![1], vec![1]], + }, + TestVector { + name: "single_item_multiple_element".into(), + vector: vec![vec![0, 1, 2]], + }, + TestVector { + name: "two_items_multiple_element".into(), + vector: vec![vec![0, 1, 2], vec![3, 4, 5]], + }, + TestVector { + name: "three_items_multiple_element".into(), + vector: vec![vec![0, 1, 2], vec![3, 4], vec![5, 6, 7, 8]], + }, + TestVector { + name: "empty_list_at_start".into(), + vector: vec![vec![], vec![3, 4], vec![5, 6, 7, 8]], + }, + TestVector { + name: "empty_list_at_middle".into(), + vector: vec![vec![0, 1, 2], vec![], vec![5, 6, 7, 8]], + }, + TestVector { + name: "empty_list_at_end".into(), + vector: vec![vec![0, 1, 2], vec![3, 4, 5], vec![]], + }, + TestVector { + name: "two_empty_lists".into(), + vector: vec![vec![], vec![]], + }, + TestVector { + name: "three_empty_lists".into(), + vector: vec![vec![], vec![], vec![]], + }, + ]; + + let mut rng = XorShiftRng::from_seed([42; 16]); + for i in 0..NUM_RANDOM_VECTORS { + let vector = Transactions::::random_for_test(&mut rng); + vectors.push(TestVector { + name: format!("random_vector_{i}"), + vector: vector.iter().map(|slice| slice.to_vec()).collect(), + }) + } + + Self { vectors } + } + } + + impl TestVectors { + fn iter( + &self, + ) -> impl Iterator, ReferenceTransactions)> + '_ + { + self.vectors.iter().map(|vector| { + let name = vector.name.clone(); + let transactions = Transactions::from(vector.vector.clone()); + + // Build a equivalent object using + // `VariableList>`. We can use this for + // reference testing + let mut reference = ReferenceTransactions::default(); + for tx in &vector.vector { + reference.push(tx.clone().into()).unwrap(); + } + + // Perform basic sanity checking against the reference. + assert_eq!(transactions.len(), reference.len()); + let mut transactions_iter = transactions.iter(); + let mut reference_iter = reference.iter(); + for _ in 0..transactions.len() { + assert_eq!( + transactions_iter.next().expect("not enough transactions"), + reference_iter + .next() + .expect("not enough reference txs") + .as_ref(), + "transaction not equal" + ); + } + assert!(transactions_iter.next().is_none(), "excess transactions"); + assert!(reference_iter.next().is_none(), "excess reference txs"); + drop((transactions_iter, reference_iter)); + + (name, transactions, reference) + }) + } + } + + #[test] + fn ssz() { + for (test, transactions, reference) in TestVectors::default().iter() { + assert_eq!( + transactions.ssz_bytes_len(), + reference.ssz_bytes_len(), + "{test} - ssz_bytes_len" + ); + assert_eq!( + transactions.as_ssz_bytes(), + reference.as_ssz_bytes(), + "{test} - serialization" + ); + assert_eq!( + transactions, + Transactions::from_ssz_bytes(&reference.as_ssz_bytes()).unwrap(), + "{test} - deserialization" + ) + } + } + + fn err_from_bytes(bytes: &[u8]) -> DecodeError { + Transactions::::from_ssz_bytes(bytes).unwrap_err() + } + + /// Helper to build invalid SSZ bytes. + #[derive(Default)] + struct InvalidSszBuilder { + ssz: Vec, + } + + impl InvalidSszBuilder { + // Append a 4-byte offset to self. + pub fn append_offset(mut self, index: usize) -> Self { + self.ssz.extend_from_slice(&encode_length(index)); + self + } + + // Append some misc bytes to self. + pub fn append_value(mut self, value: &[u8]) -> Self { + self.ssz.extend_from_slice(value); + self + } + + pub fn ssz(&self) -> &[u8] { + &self.ssz + } + } + + #[test] + fn ssz_malicious() { + // Highest offset that's still a divisor of 4. + let max_offset = u32::MAX as usize - 3; + + assert_eq!( + err_from_bytes(&[0]), + DecodeError::InvalidLengthPrefix { + len: 1, + expected: 4 + } + ); + assert_eq!( + err_from_bytes( + InvalidSszBuilder::default() + // This offset points to itself. Illegal. + .append_offset(0) + .ssz() + ), + DecodeError::InvalidListFixedBytesLen(0) + ); + assert_eq!( + err_from_bytes( + InvalidSszBuilder::default() + .append_offset(8) + // This offset points back to the first offset. Illegal. + .append_offset(0) + .ssz() + ), + DecodeError::OffsetIntoFixedPortion(0) + ); + assert_eq!( + err_from_bytes( + InvalidSszBuilder::default() + // This offset is far bigger than the SSZ buffer. Illegal. + .append_offset(max_offset) + .ssz() + ), + DecodeError::OffsetOutOfBounds(max_offset) + ); + assert!(matches!( + err_from_bytes( + InvalidSszBuilder::default() + .append_offset(8) + // This infers a really huge transaction. Illegal. + .append_offset(max_offset) + .append_value(&[0]) + .ssz() + ), + DecodeError::BytesInvalid(_) + )); + assert_eq!( + err_from_bytes( + InvalidSszBuilder::default() + .append_offset(8) + // This points outside of the given bytes. Illegal. + .append_offset(9) + .ssz() + ), + DecodeError::OffsetOutOfBounds(1) + ); + } + + #[test] + fn tree_hash() { + for (test, transactions, reference) in TestVectors::default().iter() { + assert_eq!( + transactions.tree_hash_root(), + reference.tree_hash_root(), + "{test}" + ) + } + } + + #[derive(Serialize, Deserialize)] + #[serde(transparent)] + struct SerdeWrapper { + #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] + reference: ReferenceTransactions, + } + + #[test] + fn json() { + for (test, transactions, reference) in TestVectors::default().iter() { + let reference = SerdeWrapper { reference }; + + assert_eq!( + serde_json::to_string(&transactions).unwrap(), + serde_json::to_string(&reference).unwrap(), + "{test} - to json" + ); + + assert_eq!( + transactions, + serde_json::from_str(&serde_json::to_string(&reference).unwrap()).unwrap(), + "{test} - deserialize" + ); + } + } + + #[test] + fn yaml() { + for (test, transactions, reference) in TestVectors::default().iter() { + let reference = SerdeWrapper { reference }; + + assert_eq!( + serde_yaml::to_string(&transactions).unwrap(), + serde_yaml::to_string(&reference).unwrap(), + "{test} - to json" + ); + + assert_eq!( + transactions, + serde_yaml::from_str(&serde_yaml::to_string(&reference).unwrap()).unwrap(), + "{test} - deserialize" + ); + } + } +}