From ecff20f28918fa3de411b0c14ffb84ecf7a1ca3a Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Sun, 12 Jan 2025 21:05:56 +0530 Subject: [PATCH 01/15] use temp directory for test db --- Cargo.toml | 1 + src/primary/header_elector.rs | 37 +++++++++++++++++++++-------------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ab748c8..e27ab87 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,6 +31,7 @@ rocksdb = "0.22.0" byte-unit = "5.1.6" getset = "0.1.3" chrono = "0.4" +tempfile = "3.15.0" [dev-dependencies] rstest = "0.23.0" diff --git a/src/primary/header_elector.rs b/src/primary/header_elector.rs index 812a4f0..53ae7ef 100644 --- a/src/primary/header_elector.rs +++ b/src/primary/header_elector.rs @@ -143,7 +143,7 @@ mod test { identity::ed25519::{self, Keypair}, PeerId, }; - use rstest::rstest; + use tempfile::tempdir; use tokio::{ sync::{broadcast, mpsc, watch}, time::timeout, @@ -153,8 +153,9 @@ mod test { use crate::{ db::{Column, Db}, primary::test_utils::fixtures::{ - load_committee, random_digests, CHANNEL_CAPACITY, COMMITTEE_PATH, GENESIS_SEED, + random_digests, CHANNEL_CAPACITY, }, + settings::parser::Committee, types::{ block_header::BlockHeader, certificate::{Certificate, CertificateId}, @@ -178,18 +179,28 @@ mod test { mpsc::Receiver, Arc, CancellationToken, + tempfile::TempDir, ); - fn launch_header_elector(committee_path: String, db_path: &str) -> HeaderElectorFixutre { + async fn launch_header_elector() -> HeaderElectorFixutre { let (headers_tx, headers_rx) = broadcast::channel(CHANNEL_CAPACITY); let (round_tx, round_rx) = watch::channel((0, HashSet::new())); let (incomplete_headers_tx, incomplete_headers_rx) = mpsc::channel(CHANNEL_CAPACITY); let (network_tx, network_rx) = mpsc::channel(CHANNEL_CAPACITY); - let db = Arc::new(Db::new(db_path.into()).unwrap()); + + // Create a temporary directory for the test database + let temp_dir = tempdir().unwrap(); + let db_path = temp_dir.path().join("test.db"); + + let db = Arc::new(Db::new(db_path).unwrap()); + let validator_keypair = ed25519::Keypair::generate(); let token = CancellationToken::new(); let db_clone = db.clone(); let token_clone = token.clone(); + + let committee = Committee::new_test(); + tokio::spawn(async move { HeaderElector::spawn( token_clone, @@ -198,12 +209,13 @@ mod test { round_rx, validator_keypair, db_clone, - load_committee(&committee_path), + committee, incomplete_headers_tx, ) .await .unwrap() }); + ( headers_tx, round_tx, @@ -211,6 +223,7 @@ mod test { network_rx, db, token, + temp_dir, ) } @@ -259,21 +272,15 @@ mod test { let vote_status = vote.verify(&header_hash); assert!(vote_status.is_ok()); } - _ => { - assert!(false); - } + _ => assert!(false), } } #[tokio::test] - #[rstest] async fn test_first_round_valid_header_digests_stored() { - let (headers_tx, round_state_tx, _incomplete_headers_rx, network_rx, db, _) = - launch_header_elector( - COMMITTEE_PATH.into(), - "/tmp/test_first_round_valid_header_digests_stored_db", - ); - let genesis = Certificate::genesis(GENESIS_SEED); + let (headers_tx, round_state_tx, _incomplete_headers_rx, network_rx, db, _, _temp_dir) = + launch_header_elector().await; + let genesis = Certificate::genesis([0; 32]); let header = random_header(&[genesis.id()], 1); set_header_storage_in_db(&header, &db); set_certificates_in_db(&[genesis.clone()], &db); From 7ce75c8612cea22c1182a26ea1c3f5e0aa1b53d8 Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Sun, 12 Jan 2025 21:06:11 +0530 Subject: [PATCH 02/15] add header builder tests --- src/primary/tests/header_tests.rs | 314 ++++++++++++++++++++++++++++++ 1 file changed, 314 insertions(+) create mode 100644 src/primary/tests/header_tests.rs diff --git a/src/primary/tests/header_tests.rs b/src/primary/tests/header_tests.rs new file mode 100644 index 0000000..d15e3e2 --- /dev/null +++ b/src/primary/tests/header_tests.rs @@ -0,0 +1,314 @@ +use std::{collections::HashSet, sync::Arc, time::Duration}; + +use libp2p::identity::ed25519::Keypair; +use tokio::{ + sync::{broadcast, mpsc, watch}, + time::sleep, +}; +use tokio_util::sync::CancellationToken; + +use crate::{ + db::Db, + primary::header_builder::{wait_for_quorum, HeaderBuilder}, + settings::parser::Committee, + types::{ + batch::BatchId, + block_header::BlockHeader, + certificate::Certificate, + network::{NetworkRequest, ReceivedObject}, + sync::SyncStatus, + vote::Vote, + Round, + }, + utils::CircularBuffer, +}; + +#[tokio::test] +async fn test_header_builder_initialization() { + let (network_tx, _) = mpsc::channel(100); + let (certificate_tx, _) = mpsc::channel(100); + let keypair = Keypair::generate(); + let db = Arc::new(Db::new_in_memory().await.unwrap()); + let (header_trigger_tx, header_trigger_rx) = watch::channel((0, HashSet::new())); + let (votes_tx, votes_rx) = broadcast::channel(100); + let digests_buffer = Arc::new(tokio::sync::Mutex::new(CircularBuffer::new(100))); + let committee = Committee::new_test(); + let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); + + let _header_builder = HeaderBuilder { + network_tx, + certificate_tx, + keypair, + _db: db, + header_trigger_rx, + votes_rx, + digests_buffer, + committee, + sync_status_rx, + }; +} + +#[tokio::test] +async fn test_wait_for_quorum() { + let keypair = Keypair::generate(); + let (votes_tx, mut votes_rx) = broadcast::channel(100); + let header = BlockHeader::new_test(); + let threshold = 2; + + // send votes + tokio::spawn(async move { + for i in 0..threshold { + let vote = Vote::new_test(); + let received = ReceivedObject { + object: vote, + peer_id: format!("peer_{}", i), + }; + votes_tx.send(received).unwrap(); + sleep(Duration::from_millis(100)).await; + } + }); + + // Wait for quorum + let result = wait_for_quorum(&header, threshold, &mut votes_rx, &keypair).await; + assert!(result.is_ok()); + let votes = result.unwrap(); + assert_eq!(votes.len(), threshold); +} + +#[tokio::test] +async fn test_header_builder_sync_status() { + let (network_tx, _) = mpsc::channel(100); + let (certificate_tx, _) = mpsc::channel(100); + let keypair = Keypair::generate(); + let db = Arc::new(Db::new_in_memory().await.unwrap()); + let (header_trigger_tx, header_trigger_rx) = watch::channel((0, HashSet::new())); + let (votes_tx, votes_rx) = broadcast::channel(100); + let digests_buffer = Arc::new(tokio::sync::Mutex::new(CircularBuffer::new(100))); + let committee = Committee::new_test(); + let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Incomplete); + + let header_builder = HeaderBuilder { + network_tx, + certificate_tx, + keypair, + _db: db, + header_trigger_rx, + votes_rx, + digests_buffer, + committee, + sync_status_rx, + }; + + // run header builder + let handle = tokio::spawn(async move { + header_builder.run().await.unwrap(); + }); + + // Test that header builder waits for sync to complete + sleep(Duration::from_millis(100)).await; + sync_status_tx.send(SyncStatus::Complete).unwrap(); + + handle.abort(); +} + +#[tokio::test] +async fn test_header_builder_with_empty_digests() { + let (network_tx, _) = mpsc::channel(100); + let (certificate_tx, _) = mpsc::channel(100); + let keypair = Keypair::generate(); + let db = Arc::new(Db::new_in_memory().await.unwrap()); + let (header_trigger_tx, header_trigger_rx) = watch::channel((0, HashSet::new())); + let (votes_tx, votes_rx) = broadcast::channel(100); + let digests_buffer = Arc::new(tokio::sync::Mutex::new(CircularBuffer::new(100))); + let committee = Committee::new_test(); + let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); + + let header_builder = HeaderBuilder { + network_tx, + certificate_tx, + keypair, + _db: db, + header_trigger_rx, + votes_rx, + digests_buffer, + committee, + sync_status_rx, + }; + + // run header builder + let handle = tokio::spawn(async move { + header_builder.run().await.unwrap(); + }); + + // trigger header building with empty digests + header_trigger_tx.send((1, HashSet::new())).unwrap(); + sleep(Duration::from_millis(100)).await; + + handle.abort(); +} + +#[tokio::test] +async fn test_header_builder_multiple_rounds() { + let (network_tx, _) = mpsc::channel(100); + let (certificate_tx, mut cert_rx) = mpsc::channel(100); + let keypair = Keypair::generate(); + let db = Arc::new(Db::new_in_memory().await.unwrap()); + let (header_trigger_tx, header_trigger_rx) = watch::channel((0, HashSet::new())); + let (votes_tx, votes_rx) = broadcast::channel(100); + let digests_buffer = Arc::new(tokio::sync::Mutex::new(CircularBuffer::new(100))); + let committee = Committee::new_test(); + let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); + + let header_builder = HeaderBuilder { + network_tx, + certificate_tx, + keypair, + _db: db, + header_trigger_rx, + votes_rx, + digests_buffer, + committee, + sync_status_rx, + }; + + // run header builder + let handle = tokio::spawn(async move { + header_builder.run().await.unwrap(); + }); + + // trigger multiple rounds + for round in 1..=3 { + let mut certs = HashSet::new(); + certs.insert(Certificate::genesis([round as u8; 32])); + header_trigger_tx.send((round, certs)).unwrap(); + sleep(Duration::from_millis(100)).await; + } + + handle.abort(); +} + +#[tokio::test] +async fn test_header_builder_quorum_timeout() { + let (network_tx, _) = mpsc::channel(100); + let (certificate_tx, _) = mpsc::channel(100); + let keypair = Keypair::generate(); + let db = Arc::new(Db::new_in_memory().await.unwrap()); + let (header_trigger_tx, header_trigger_rx) = watch::channel((0, HashSet::new())); + let (votes_tx, votes_rx) = broadcast::channel(100); + let digests_buffer = Arc::new(tokio::sync::Mutex::new(CircularBuffer::new(100))); + let committee = Committee::new_test(); + let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); + + let header_builder = HeaderBuilder { + network_tx, + certificate_tx, + keypair, + _db: db, + header_trigger_rx, + votes_rx, + digests_buffer, + committee, + sync_status_rx, + }; + + // run header builder + let handle = tokio::spawn(async move { + header_builder.run().await.unwrap(); + }); + + // trigger header building but don't send any votes + let mut certs = HashSet::new(); + certs.insert(Certificate::genesis([0; 32])); + header_trigger_tx.send((1, certs)).unwrap(); + + // wait for timeout duration + sleep(Duration::from_millis(110)).await; + + handle.abort(); +} + +#[tokio::test] +async fn test_header_builder_invalid_votes() { + let (network_tx, _) = mpsc::channel(100); + let (certificate_tx, _) = mpsc::channel(100); + let keypair = Keypair::generate(); + let db = Arc::new(Db::new_in_memory().await.unwrap()); + let (header_trigger_tx, header_trigger_rx) = watch::channel((0, HashSet::new())); + let (votes_tx, votes_rx) = broadcast::channel(100); + let digests_buffer = Arc::new(tokio::sync::Mutex::new(CircularBuffer::new(100))); + let committee = Committee::new_test(); + let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); + + let header_builder = HeaderBuilder { + network_tx, + certificate_tx, + keypair, + _db: db, + header_trigger_rx, + votes_rx, + digests_buffer, + committee, + sync_status_rx, + }; + + // run header builder + let handle = tokio::spawn(async move { + header_builder.run().await.unwrap(); + }); + + // send invalid votes + let invalid_vote = Vote::new_test_invalid(); + votes_tx.send(ReceivedObject::new(invalid_vote, "peer1".to_string())).unwrap(); + + sleep(Duration::from_millis(10)).await; + handle.abort(); +} + +#[tokio::test] +async fn test_header_builder_digest_buffer() { + let (network_tx, _) = mpsc::channel(100); + let (certificate_tx, _) = mpsc::channel(100); + let keypair = Keypair::generate(); + let db = Arc::new(Db::new_in_memory().await.unwrap()); + let (header_trigger_tx, header_trigger_rx) = watch::channel((0, HashSet::new())); + let (votes_tx, votes_rx) = broadcast::channel(100); + let digests_buffer = Arc::new(tokio::sync::Mutex::new(CircularBuffer::new(2))); // Small buffer + let committee = Committee::new_test(); + let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); + + let header_builder = HeaderBuilder { + network_tx, + certificate_tx, + keypair, + _db: db, + header_trigger_rx, + votes_rx, + digests_buffer: digests_buffer.clone(), + committee, + sync_status_rx, + }; + + // Run header builder in background + let handle = tokio::spawn(async move { + header_builder.run().await.unwrap(); + }); + + // Add digests to buffer + { + let mut buffer = digests_buffer.lock().await; + buffer.push(BatchId::new([1; 32])); + buffer.push(BatchId::new([2; 32])); + // This should overwrite the first digest + buffer.push(BatchId::new([3; 32])); + } + + // Verify buffer state + { + let buffer = digests_buffer.lock().await; + assert_eq!(buffer.len(), 2); + assert_eq!(buffer.as_slice()[0], BatchId::new([2; 32])); + assert_eq!(buffer.as_slice()[1], BatchId::new([3; 32])); + } + + handle.abort(); +} \ No newline at end of file From 7e74f8081af33d740e78ff1233f1795e8a16f2b5 Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Sun, 12 Jan 2025 21:06:42 +0530 Subject: [PATCH 03/15] new test authority --- src/settings/parser.rs | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/src/settings/parser.rs b/src/settings/parser.rs index 4885573..dfb7c35 100644 --- a/src/settings/parser.rs +++ b/src/settings/parser.rs @@ -1,4 +1,4 @@ -use libp2p::PeerId; +use libp2p::{identity::{self, ed25519}, PeerId}; use serde::{Deserialize, Serialize}; use std::path::Path; @@ -37,9 +37,33 @@ impl Committee { } ((self.authorities.len() / 3) * 2 + 1) as u32 } + pub fn has_authority_id(&self, peer_id: &PeerId) -> bool { self.authorities.iter().any(|a| &a.authority_id == peer_id) } + + #[cfg(test)] + pub fn new_test() -> Self { + let mut authorities = Vec::new(); + + // Add three test authorities + for i in 0..3 { + let keypair = ed25519::Keypair::generate(); + let public_key = identity::PublicKey::from(keypair.public()); + let peer_id = PeerId::from_public_key(&public_key); + + let authority = AuthorityInfo { + authority_id: peer_id, + authority_pubkey: hex::encode(keypair.public().to_bytes()), + primary_address: ("127.0.0.1".to_string(), format!("800{}", i)), + stake: 1, + workers_addresses: vec![("127.0.0.1".to_string(), format!("900{}", i))], + }; + authorities.push(authority); + } + + Committee { authorities } + } } #[derive(Clone, Debug, Deserialize, Serialize)] From d137d0d270b387e4db79c250487674444826c1fd Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Sun, 12 Jan 2025 21:06:53 +0530 Subject: [PATCH 04/15] add fetcher test --- src/synchronizer/tests/fetcher_tests.rs | 216 ++++++++++++++++++++++++ 1 file changed, 216 insertions(+) create mode 100644 src/synchronizer/tests/fetcher_tests.rs diff --git a/src/synchronizer/tests/fetcher_tests.rs b/src/synchronizer/tests/fetcher_tests.rs new file mode 100644 index 0000000..faca3c2 --- /dev/null +++ b/src/synchronizer/tests/fetcher_tests.rs @@ -0,0 +1,216 @@ +use std::collections::HashSet; +use tokio::sync::{broadcast, mpsc}; + +use crate::{ + synchronizer::{ + fetcher::Fetcher, + traits::{DataProvider, Fetch}, + FetchError, RequestedObject, + }, + types::{ + network::{NetworkRequest, ReceivedObject, RequestPayload, SyncRequest, SyncResponse}, + traits::{AsBytes, Hash}, + }, +}; + +#[derive(Clone, Debug)] +struct TestFetchable { + id: String, + data: Vec, +} + +impl Hash for TestFetchable { + fn hash(&self) -> String { + self.id.clone() + } +} + +impl AsBytes for TestFetchable { + fn bytes(&self) -> Vec { + self.data.clone() + } +} + +struct TestDataProvider { + peers: Vec, +} + +impl DataProvider for TestDataProvider { + fn peers(&self) -> Vec { + self.peers.clone() + } +} + +#[tokio::test] +async fn test_fetcher_empty() { + let (requests_tx, _) = mpsc::channel(100); + let (_, responses_rx) = broadcast::channel(100); + + let mut fetcher = Fetcher::new(); + let result = fetcher.run(requests_tx, responses_rx).await; + assert!(result.is_ok()); +} + +#[tokio::test] +async fn test_fetcher_single_request() { + let (requests_tx, mut requests_rx) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + let test_data = TestFetchable { + id: "test1".to_string(), + data: vec![1, 2, 3], + }; + + let provider = Box::new(TestDataProvider { + peers: vec!["peer1".to_string()], + }); + + let requested_object = RequestedObject { + object: test_data.clone(), + source: provider, + }; + + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(requested_object)); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // verify request is sent + let request = requests_rx.recv().await.unwrap(); + match request { + NetworkRequest::Sync { request, peer_id } => { + assert_eq!(peer_id, "peer1"); + } + _ => panic!("Expected sync request"), + } + + // send response + let response = SyncResponse::Success(RequestPayload::Data(test_data.bytes())); + let received = ReceivedObject { + object: response, + peer_id: "peer1".to_string(), + }; + responses_tx.send(received).unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + handle.abort(); +} + +#[tokio::test] +async fn test_fetcher_timeout() { + let (requests_tx, _) = mpsc::channel(100); + let (_, responses_rx) = broadcast::channel(100); + + let test_data = TestFetchable { + id: "test1".to_string(), + data: vec![1, 2, 3], + }; + + let provider = Box::new(TestDataProvider { + peers: vec!["peer1".to_string()], + }); + + let requested_object = RequestedObject { + object: test_data, + source: provider, + }; + + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(requested_object)); + + // Run fetcher with very short timeout + tokio::time::timeout( + tokio::time::Duration::from_millis(50), + fetcher.run(requests_tx, responses_rx) + ).await.unwrap_err(); +} + +#[tokio::test] +async fn test_fetcher_error_response() { + let (requests_tx, _) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + let test_data = TestFetchable { + id: "test1".to_string(), + data: vec![1, 2, 3], + }; + + let provider = Box::new(TestDataProvider { + peers: vec!["peer1".to_string()], + }); + + let requested_object = RequestedObject { + object: test_data, + source: provider, + }; + + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(requested_object)); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // send error response + let response = SyncResponse::Error("test error".to_string()); + let received = ReceivedObject { + object: response, + peer_id: "peer1".to_string(), + }; + responses_tx.send(received).unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + handle.abort(); +} + +#[tokio::test] +async fn test_fetcher_multiple_peers() { + let (requests_tx, mut requests_rx) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + let test_data = TestFetchable { + id: "test1".to_string(), + data: vec![1, 2, 3], + }; + + let provider = Box::new(TestDataProvider { + peers: vec!["peer1".to_string(), "peer2".to_string(), "peer3".to_string()], + }); + + let requested_object = RequestedObject { + object: test_data.clone(), + source: provider, + }; + + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(requested_object)); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // verify first request is sent + let request = requests_rx.recv().await.unwrap(); + match request { + NetworkRequest::Sync { request, peer_id } => { + assert!(["peer1", "peer2", "peer3"].contains(&peer_id.as_str())); + } + _ => panic!("Expected sync request"), + } + + // send successful response from one peer + let response = SyncResponse::Success(RequestPayload::Data(test_data.bytes())); + let received = ReceivedObject { + object: response, + peer_id: "peer2".to_string(), + }; + responses_tx.send(received).unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + handle.abort(); +} \ No newline at end of file From 4d60c2302115fc3f282bf071cb85c98c7f08b851 Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Sun, 12 Jan 2025 21:07:09 +0530 Subject: [PATCH 05/15] add Primary synchronizer tests --- src/synchronizer/tests/synchronizer_tests.rs | 403 +++++++++++++++++++ 1 file changed, 403 insertions(+) create mode 100644 src/synchronizer/tests/synchronizer_tests.rs diff --git a/src/synchronizer/tests/synchronizer_tests.rs b/src/synchronizer/tests/synchronizer_tests.rs new file mode 100644 index 0000000..1a5dd9c --- /dev/null +++ b/src/synchronizer/tests/synchronizer_tests.rs @@ -0,0 +1,403 @@ +use std::{collections::HashSet, sync::Arc}; + +use tokio::sync::{broadcast, mpsc}; + +use crate::{ + synchronizer::{ + fetcher::Fetcher, + traits::{DataProvider, Fetch}, + RequestedObject, + }, + types::{ + network::{NetworkRequest, ReceivedObject, RequestPayload, SyncRequest, SyncResponse}, + traits::{AsBytes, Hash}, + }, +}; + +#[derive(Clone, Debug)] +struct MockData { + id: String, +} + +impl Hash for MockData { + fn hash(&self) -> String { + self.id.clone() + } +} + +impl AsBytes for MockData { + fn bytes(&self) -> Vec { + self.id.as_bytes().to_vec() + } +} + +struct MockDataProvider { + peers: Vec, +} + +impl DataProvider for MockDataProvider { + fn peers(&self) -> Vec { + self.peers.clone() + } +} + +#[tokio::test] +async fn test_fetcher_basic() { + let (requests_tx, mut requests_rx) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + // create mock data and provider + let mock_data = MockData { + id: "test_data".to_string(), + }; + let mock_provider = Box::new(MockDataProvider { + peers: vec!["peer1".to_string(), "peer2".to_string()], + }); + + let requested_object = RequestedObject { + object: mock_data, + source: mock_provider, + }; + + // create and start fetcher + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(requested_object)); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // verify request is sent + let request = requests_rx.recv().await.unwrap(); + match request { + NetworkRequest::Sync { request, peer_id } => { + assert!(peer_id == "peer1" || peer_id == "peer2"); + } + _ => panic!("Expected sync request"), + } + + handle.abort(); +} + +#[tokio::test] +async fn test_fetcher_response_handling() { + let (requests_tx, _) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + // create mock data and provider + let mock_data = MockData { + id: "test_data".to_string(), + }; + let mock_provider = Box::new(MockDataProvider { + peers: vec!["peer1".to_string()], + }); + + let requested_object = RequestedObject { + object: mock_data.clone(), + source: mock_provider, + }; + + // create and start fetcher + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(requested_object)); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // send mock response + let response = SyncResponse::Success(RequestPayload::Data(mock_data.bytes())); + let received_response = ReceivedObject { + object: response, + peer_id: "peer1".to_string(), + }; + responses_tx.send(received_response).unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + handle.abort(); +} + +#[tokio::test] +async fn test_fetcher_multiple_objects() { + let (requests_tx, mut requests_rx) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + let mock_data1 = MockData { + id: "test_data1".to_string(), + }; + let mock_data2 = MockData { + id: "test_data2".to_string(), + }; + + let mock_provider = Box::new(MockDataProvider { + peers: vec!["peer1".to_string()], + }); + + let requested_object1 = RequestedObject { + object: mock_data1, + source: mock_provider.clone(), + }; + let requested_object2 = RequestedObject { + object: mock_data2, + source: mock_provider, + }; + + // create and start fetcher + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(requested_object1)); + fetcher.push(Box::new(requested_object2)); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // verify multiple requests are sent + let request1 = requests_rx.recv().await.unwrap(); + let request2 = requests_rx.recv().await.unwrap(); + + assert!(matches!(request1, NetworkRequest::Sync { .. })); + assert!(matches!(request2, NetworkRequest::Sync { .. })); + + handle.abort(); +} + +#[tokio::test] +async fn test_synchronizer_concurrent_requests() { + let (requests_tx, mut requests_rx) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + // create multiple mock objects with different providers + let mock_data1 = MockData { + id: "data1".to_string(), + }; + let mock_data2 = MockData { + id: "data2".to_string(), + }; + let mock_data3 = MockData { + id: "data3".to_string(), + }; + + let provider1 = Box::new(MockDataProvider { + peers: vec!["peer1".to_string()], + }); + let provider2 = Box::new(MockDataProvider { + peers: vec!["peer2".to_string()], + }); + let provider3 = Box::new(MockDataProvider { + peers: vec!["peer3".to_string()], + }); + + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(RequestedObject { + object: mock_data1.clone(), + source: provider1, + })); + fetcher.push(Box::new(RequestedObject { + object: mock_data2.clone(), + source: provider2, + })); + fetcher.push(Box::new(RequestedObject { + object: mock_data3.clone(), + source: provider3, + })); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // collect all requests + let mut received_requests = HashSet::new(); + for _ in 0..3 { + if let NetworkRequest::Sync { request: _, peer_id } = requests_rx.recv().await.unwrap() { + received_requests.insert(peer_id); + } + } + + assert_eq!(received_requests.len(), 3); + assert!(received_requests.contains("peer1")); + assert!(received_requests.contains("peer2")); + assert!(received_requests.contains("peer3")); + + handle.abort(); +} + +#[tokio::test] +async fn test_synchronizer_retry_on_failure() { + let (requests_tx, mut requests_rx) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + let mock_data = MockData { + id: "test_data".to_string(), + }; + + let provider = Box::new(MockDataProvider { + peers: vec!["peer1".to_string(), "peer2".to_string()], + }); + + let requested_object = RequestedObject { + object: mock_data.clone(), + source: provider, + }; + + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(requested_object)); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // get first request + let request1 = requests_rx.recv().await.unwrap(); + match request1 { + NetworkRequest::Sync { request: _, peer_id } => { + // send error response + let error_response = SyncResponse::Error("test error".to_string()); + responses_tx + .send(ReceivedObject { + object: error_response, + peer_id: peer_id.clone(), + }) + .unwrap(); + } + _ => panic!("Expected sync request"), + } + + // should get another request to different peer + let request2 = requests_rx.recv().await.unwrap(); + match request2 { + NetworkRequest::Sync { request: _, peer_id } => { + assert_ne!( + peer_id, + match request1 { + NetworkRequest::Sync { peer_id, .. } => peer_id, + _ => panic!("Expected sync request"), + } + ); + } + _ => panic!("Expected sync request"), + } + + handle.abort(); +} + +#[tokio::test] +async fn test_synchronizer_partial_response() { + let (requests_tx, mut requests_rx) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + let mock_data1 = MockData { + id: "data1".to_string(), + }; + let mock_data2 = MockData { + id: "data2".to_string(), + }; + + let provider = Box::new(MockDataProvider { + peers: vec!["peer1".to_string()], + }); + + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(RequestedObject { + object: mock_data1.clone(), + source: provider.clone(), + })); + fetcher.push(Box::new(RequestedObject { + object: mock_data2.clone(), + source: provider, + })); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // get first request + let request = requests_rx.recv().await.unwrap(); + match request { + NetworkRequest::Sync { request: _, peer_id } => { + // Send partial success response + let response = SyncResponse::Success(RequestPayload::Data(mock_data1.bytes())); + responses_tx + .send(ReceivedObject { + object: response, + peer_id, + }) + .unwrap(); + } + _ => panic!("Expected sync request"), + } + + // Should get another request for remaining data + let request = requests_rx.recv().await.unwrap(); + match request { + NetworkRequest::Sync { request: _, peer_id } => { + // Send success response for second object + let response = SyncResponse::Success(RequestPayload::Data(mock_data2.bytes())); + responses_tx + .send(ReceivedObject { + object: response, + peer_id, + }) + .unwrap(); + } + _ => panic!("Expected sync request"), + } + + handle.abort(); +} + +#[tokio::test] +async fn test_synchronizer_invalid_response_data() { + let (requests_tx, mut requests_rx) = mpsc::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); + + let mock_data = MockData { + id: "test_data".to_string(), + }; + + let provider = Box::new(MockDataProvider { + peers: vec!["peer1".to_string(), "peer2".to_string()], + }); + + let requested_object = RequestedObject { + object: mock_data.clone(), + source: provider, + }; + + let mut fetcher = Fetcher::new(); + fetcher.push(Box::new(requested_object)); + + // run fetcher + let handle = tokio::spawn(async move { + fetcher.run(requests_tx, responses_rx).await.unwrap(); + }); + + // get request + let request = requests_rx.recv().await.unwrap(); + match request { + NetworkRequest::Sync { request: _, peer_id } => { + // Send invalid response data + let invalid_data = vec![0, 1, 2]; // Different from what was requested + let response = SyncResponse::Success(RequestPayload::Data(invalid_data)); + responses_tx + .send(ReceivedObject { + object: response, + peer_id, + }) + .unwrap(); + } + _ => panic!("Expected sync request"), + } + + // should get another request due to invalid response + let request = requests_rx.recv().await.unwrap(); + assert!(matches!(request, NetworkRequest::Sync { .. })); + + handle.abort(); +} \ No newline at end of file From dbdcf637a3fe2f99f6c48a1fbd5debcbe0cdc88d Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Sun, 12 Jan 2025 21:07:19 +0530 Subject: [PATCH 06/15] add dag tests --- src/types/tests/dag_tests.rs | 201 +++++++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 src/types/tests/dag_tests.rs diff --git a/src/types/tests/dag_tests.rs b/src/types/tests/dag_tests.rs new file mode 100644 index 0000000..a7e1af9 --- /dev/null +++ b/src/types/tests/dag_tests.rs @@ -0,0 +1,201 @@ +use std::collections::HashSet; + +use crate::types::{ + dag::{Dag, DagError, Vertex}, + traits::{AsBytes, Hash}, +}; + +#[derive(Clone, Debug)] +struct TestData { + value: u64, +} + +impl Hash for TestData { + fn hash(&self) -> String { + format!("test_data_{}", self.value) + } +} + +impl AsBytes for TestData { + fn bytes(&self) -> Vec { + self.value.to_be_bytes().to_vec() + } +} + +#[tokio::test] +async fn test_dag_creation_and_basic_ops() { + let base_layer = 0; + let mut dag: Dag = Dag::new(base_layer); + + // Test initial state + assert_eq!(dag.height(), base_layer); + assert_eq!(dag.base_layer(), base_layer); + + // Test vertex insertion + let data = TestData { value: 1 }; + let parents = HashSet::new(); + let vertex = Vertex::from_data(data, 1, parents); + + dag.insert(vertex.clone()).unwrap(); + assert_eq!(dag.height(), 1); + assert_eq!(dag.layer_size(1), 1); +} + +#[tokio::test] +async fn test_dag_parent_child_relationships() { + let mut dag: Dag = Dag::new(0); + + // Create parent vertex + let parent_data = TestData { value: 1 }; + let parent_vertex = Vertex::from_data(parent_data, 1, HashSet::new()); + let parent_id = parent_vertex.id().clone(); + dag.insert(parent_vertex).unwrap(); + + // Create child vertex with parent reference + let mut parents = HashSet::new(); + parents.insert(parent_id); + let child_data = TestData { value: 2 }; + let child_vertex = Vertex::from_data(child_data, 2, parents); + + dag.insert_checked(child_vertex).unwrap(); +} + +#[tokio::test] +async fn test_dag_invalid_parent() { + let mut dag: Dag = Dag::new(0); + + let mut parents = HashSet::new(); + parents.insert("non_existent_parent".to_string()); + let data = TestData { value: 1 }; + let vertex = Vertex::from_data(data, 1, parents); + + match dag.insert_checked(vertex) { + Err(DagError::MissingParents(_)) => (), + _ => panic!("Expected MissingParents error"), + } +} + +#[tokio::test] +async fn test_dag_layer_operations() { + let mut dag: Dag = Dag::new(0); + + // Insert vertices in different layers + for i in 1..=3 { + let data = TestData { value: i }; + let vertex = Vertex::from_data(data, i as u64, HashSet::new()); + dag.insert(vertex).unwrap(); + } + + // Test layer queries + assert_eq!(dag.layer_size(1), 1); + assert_eq!(dag.layer_size(2), 1); + assert_eq!(dag.layer_size(3), 1); + + let layer_2_vertices = dag.layer_vertices(2); + assert_eq!(layer_2_vertices.len(), 1); + assert_eq!(layer_2_vertices[0].data().value, 2); +} + +#[tokio::test] +async fn test_dag_multiple_parents() { + let mut dag: Dag = Dag::new(0); + + // Create two parent vertices + let parent1_data = TestData { value: 1 }; + let parent2_data = TestData { value: 2 }; + let parent1_vertex = Vertex::from_data(parent1_data, 1, HashSet::new()); + let parent2_vertex = Vertex::from_data(parent2_data, 1, HashSet::new()); + + let parent1_id = parent1_vertex.id().clone(); + let parent2_id = parent2_vertex.id().clone(); + + dag.insert(parent1_vertex).unwrap(); + dag.insert(parent2_vertex).unwrap(); + + // Create child with multiple parents + let mut parents = HashSet::new(); + parents.insert(parent1_id); + parents.insert(parent2_id); + + let child_data = TestData { value: 3 }; + let child_vertex = Vertex::from_data(child_data, 2, parents); + + dag.insert_checked(child_vertex).unwrap(); + assert_eq!(dag.layer_size(2), 1); +} + +#[tokio::test] +async fn test_dag_vertex_retrieval() { + let mut dag: Dag = Dag::new(0); + + let data = TestData { value: 1 }; + let vertex = Vertex::from_data(data, 1, HashSet::new()); + let vertex_id = vertex.id().clone(); + + dag.insert(vertex).unwrap(); + + let retrieved = dag.get_vertex(&vertex_id).unwrap(); + assert_eq!(retrieved.data().value, 1); + assert_eq!(retrieved.layer(), 1); +} + +#[tokio::test] +async fn test_dag_cyclic_insertion_prevention() { + let mut dag: Dag = Dag::new(0); + + // Create first vertex + let data1 = TestData { value: 1 }; + let vertex1 = Vertex::from_data(data1, 1, HashSet::new()); + let vertex1_id = vertex1.id().clone(); + dag.insert(vertex1).unwrap(); + + // Try to create a vertex in a lower layer referencing a higher layer + let mut parents = HashSet::new(); + parents.insert(vertex1_id); + let data2 = TestData { value: 2 }; + let vertex2 = Vertex::from_data(data2, 0, parents); + + assert!(dag.insert_checked(vertex2).is_err()); +} + +#[tokio::test] +async fn test_dag_complex_hierarchy() { + let mut dag: Dag = Dag::new(0); + + // Layer 1: Two vertices + let vertex1_1 = Vertex::from_data(TestData { value: 11 }, 1, HashSet::new()); + let vertex1_2 = Vertex::from_data(TestData { value: 12 }, 1, HashSet::new()); + let id1_1 = vertex1_1.id().clone(); + let id1_2 = vertex1_2.id().clone(); + + dag.insert(vertex1_1).unwrap(); + dag.insert(vertex1_2).unwrap(); + + // Layer 2: Two vertices, each with one parent + let mut parents2_1 = HashSet::new(); + parents2_1.insert(id1_1.clone()); + let mut parents2_2 = HashSet::new(); + parents2_2.insert(id1_2.clone()); + + let vertex2_1 = Vertex::from_data(TestData { value: 21 }, 2, parents2_1); + let vertex2_2 = Vertex::from_data(TestData { value: 22 }, 2, parents2_2); + let id2_1 = vertex2_1.id().clone(); + let id2_2 = vertex2_2.id().clone(); + + dag.insert_checked(vertex2_1).unwrap(); + dag.insert_checked(vertex2_2).unwrap(); + + // Layer 3: One vertex with both layer 2 vertices as parents + let mut parents3 = HashSet::new(); + parents3.insert(id2_1); + parents3.insert(id2_2); + + let vertex3 = Vertex::from_data(TestData { value: 31 }, 3, parents3); + dag.insert_checked(vertex3).unwrap(); + + // Verify the structure + assert_eq!(dag.layer_size(1), 2); + assert_eq!(dag.layer_size(2), 2); + assert_eq!(dag.layer_size(3), 1); + assert_eq!(dag.height(), 3); +} \ No newline at end of file From 94b8159be71d59298325a0f76dd99767a8e43269 Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Sun, 12 Jan 2025 21:26:13 +0530 Subject: [PATCH 07/15] remove redundancy in tests --- src/primary/tests/header_tests.rs | 6 +- src/synchronizer/tests/synchronizer_tests.rs | 159 ------------------- src/types/tests/dag_tests.rs | 23 +-- 3 files changed, 11 insertions(+), 177 deletions(-) diff --git a/src/primary/tests/header_tests.rs b/src/primary/tests/header_tests.rs index d15e3e2..04d94c8 100644 --- a/src/primary/tests/header_tests.rs +++ b/src/primary/tests/header_tests.rs @@ -77,6 +77,7 @@ async fn test_wait_for_quorum() { #[tokio::test] async fn test_header_builder_sync_status() { + // Test initialization let (network_tx, _) = mpsc::channel(100); let (certificate_tx, _) = mpsc::channel(100); let keypair = Keypair::generate(); @@ -87,6 +88,7 @@ async fn test_header_builder_sync_status() { let committee = Committee::new_test(); let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Incomplete); + // Verify initialization by creating header builder let header_builder = HeaderBuilder { network_tx, certificate_tx, @@ -99,7 +101,7 @@ async fn test_header_builder_sync_status() { sync_status_rx, }; - // run header builder + // Run header builder in background let handle = tokio::spawn(async move { header_builder.run().await.unwrap(); }); @@ -107,7 +109,7 @@ async fn test_header_builder_sync_status() { // Test that header builder waits for sync to complete sleep(Duration::from_millis(100)).await; sync_status_tx.send(SyncStatus::Complete).unwrap(); - + handle.abort(); } diff --git a/src/synchronizer/tests/synchronizer_tests.rs b/src/synchronizer/tests/synchronizer_tests.rs index 1a5dd9c..b0d3376 100644 --- a/src/synchronizer/tests/synchronizer_tests.rs +++ b/src/synchronizer/tests/synchronizer_tests.rs @@ -41,85 +41,6 @@ impl DataProvider for MockDataProvider { } } -#[tokio::test] -async fn test_fetcher_basic() { - let (requests_tx, mut requests_rx) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); - - // create mock data and provider - let mock_data = MockData { - id: "test_data".to_string(), - }; - let mock_provider = Box::new(MockDataProvider { - peers: vec!["peer1".to_string(), "peer2".to_string()], - }); - - let requested_object = RequestedObject { - object: mock_data, - source: mock_provider, - }; - - // create and start fetcher - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(requested_object)); - - // run fetcher - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); - - // verify request is sent - let request = requests_rx.recv().await.unwrap(); - match request { - NetworkRequest::Sync { request, peer_id } => { - assert!(peer_id == "peer1" || peer_id == "peer2"); - } - _ => panic!("Expected sync request"), - } - - handle.abort(); -} - -#[tokio::test] -async fn test_fetcher_response_handling() { - let (requests_tx, _) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); - - // create mock data and provider - let mock_data = MockData { - id: "test_data".to_string(), - }; - let mock_provider = Box::new(MockDataProvider { - peers: vec!["peer1".to_string()], - }); - - let requested_object = RequestedObject { - object: mock_data.clone(), - source: mock_provider, - }; - - // create and start fetcher - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(requested_object)); - - // run fetcher - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); - - // send mock response - let response = SyncResponse::Success(RequestPayload::Data(mock_data.bytes())); - let received_response = ReceivedObject { - object: response, - peer_id: "peer1".to_string(), - }; - responses_tx.send(received_response).unwrap(); - - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - - handle.abort(); -} - #[tokio::test] async fn test_fetcher_multiple_objects() { let (requests_tx, mut requests_rx) = mpsc::channel(100); @@ -145,17 +66,14 @@ async fn test_fetcher_multiple_objects() { source: mock_provider, }; - // create and start fetcher let mut fetcher = Fetcher::new(); fetcher.push(Box::new(requested_object1)); fetcher.push(Box::new(requested_object2)); - // run fetcher let handle = tokio::spawn(async move { fetcher.run(requests_tx, responses_rx).await.unwrap(); }); - // verify multiple requests are sent let request1 = requests_rx.recv().await.unwrap(); let request2 = requests_rx.recv().await.unwrap(); @@ -170,7 +88,6 @@ async fn test_synchronizer_concurrent_requests() { let (requests_tx, mut requests_rx) = mpsc::channel(100); let (responses_tx, responses_rx) = broadcast::channel(100); - // create multiple mock objects with different providers let mock_data1 = MockData { id: "data1".to_string(), }; @@ -205,12 +122,10 @@ async fn test_synchronizer_concurrent_requests() { source: provider3, })); - // run fetcher let handle = tokio::spawn(async move { fetcher.run(requests_tx, responses_rx).await.unwrap(); }); - // collect all requests let mut received_requests = HashSet::new(); for _ in 0..3 { if let NetworkRequest::Sync { request: _, peer_id } = requests_rx.recv().await.unwrap() { @@ -247,16 +162,13 @@ async fn test_synchronizer_retry_on_failure() { let mut fetcher = Fetcher::new(); fetcher.push(Box::new(requested_object)); - // run fetcher let handle = tokio::spawn(async move { fetcher.run(requests_tx, responses_rx).await.unwrap(); }); - // get first request let request1 = requests_rx.recv().await.unwrap(); match request1 { NetworkRequest::Sync { request: _, peer_id } => { - // send error response let error_response = SyncResponse::Error("test error".to_string()); responses_tx .send(ReceivedObject { @@ -268,7 +180,6 @@ async fn test_synchronizer_retry_on_failure() { _ => panic!("Expected sync request"), } - // should get another request to different peer let request2 = requests_rx.recv().await.unwrap(); match request2 { NetworkRequest::Sync { request: _, peer_id } => { @@ -286,72 +197,6 @@ async fn test_synchronizer_retry_on_failure() { handle.abort(); } -#[tokio::test] -async fn test_synchronizer_partial_response() { - let (requests_tx, mut requests_rx) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); - - let mock_data1 = MockData { - id: "data1".to_string(), - }; - let mock_data2 = MockData { - id: "data2".to_string(), - }; - - let provider = Box::new(MockDataProvider { - peers: vec!["peer1".to_string()], - }); - - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(RequestedObject { - object: mock_data1.clone(), - source: provider.clone(), - })); - fetcher.push(Box::new(RequestedObject { - object: mock_data2.clone(), - source: provider, - })); - - // run fetcher - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); - - // get first request - let request = requests_rx.recv().await.unwrap(); - match request { - NetworkRequest::Sync { request: _, peer_id } => { - // Send partial success response - let response = SyncResponse::Success(RequestPayload::Data(mock_data1.bytes())); - responses_tx - .send(ReceivedObject { - object: response, - peer_id, - }) - .unwrap(); - } - _ => panic!("Expected sync request"), - } - - // Should get another request for remaining data - let request = requests_rx.recv().await.unwrap(); - match request { - NetworkRequest::Sync { request: _, peer_id } => { - // Send success response for second object - let response = SyncResponse::Success(RequestPayload::Data(mock_data2.bytes())); - responses_tx - .send(ReceivedObject { - object: response, - peer_id, - }) - .unwrap(); - } - _ => panic!("Expected sync request"), - } - - handle.abort(); -} - #[tokio::test] async fn test_synchronizer_invalid_response_data() { let (requests_tx, mut requests_rx) = mpsc::channel(100); @@ -373,16 +218,13 @@ async fn test_synchronizer_invalid_response_data() { let mut fetcher = Fetcher::new(); fetcher.push(Box::new(requested_object)); - // run fetcher let handle = tokio::spawn(async move { fetcher.run(requests_tx, responses_rx).await.unwrap(); }); - // get request let request = requests_rx.recv().await.unwrap(); match request { NetworkRequest::Sync { request: _, peer_id } => { - // Send invalid response data let invalid_data = vec![0, 1, 2]; // Different from what was requested let response = SyncResponse::Success(RequestPayload::Data(invalid_data)); responses_tx @@ -395,7 +237,6 @@ async fn test_synchronizer_invalid_response_data() { _ => panic!("Expected sync request"), } - // should get another request due to invalid response let request = requests_rx.recv().await.unwrap(); assert!(matches!(request, NetworkRequest::Sync { .. })); diff --git a/src/types/tests/dag_tests.rs b/src/types/tests/dag_tests.rs index a7e1af9..526377b 100644 --- a/src/types/tests/dag_tests.rs +++ b/src/types/tests/dag_tests.rs @@ -35,10 +35,16 @@ async fn test_dag_creation_and_basic_ops() { let data = TestData { value: 1 }; let parents = HashSet::new(); let vertex = Vertex::from_data(data, 1, parents); + let vertex_id = vertex.id().clone(); - dag.insert(vertex.clone()).unwrap(); + dag.insert(vertex).unwrap(); assert_eq!(dag.height(), 1); assert_eq!(dag.layer_size(1), 1); + + // Test vertex retrieval + let retrieved = dag.get_vertex(&vertex_id).unwrap(); + assert_eq!(retrieved.data().value, 1); + assert_eq!(retrieved.layer(), 1); } #[tokio::test] @@ -124,21 +130,6 @@ async fn test_dag_multiple_parents() { assert_eq!(dag.layer_size(2), 1); } -#[tokio::test] -async fn test_dag_vertex_retrieval() { - let mut dag: Dag = Dag::new(0); - - let data = TestData { value: 1 }; - let vertex = Vertex::from_data(data, 1, HashSet::new()); - let vertex_id = vertex.id().clone(); - - dag.insert(vertex).unwrap(); - - let retrieved = dag.get_vertex(&vertex_id).unwrap(); - assert_eq!(retrieved.data().value, 1); - assert_eq!(retrieved.layer(), 1); -} - #[tokio::test] async fn test_dag_cyclic_insertion_prevention() { let mut dag: Dag = Dag::new(0); From 239c2585d178ced55ff4c7db43990a1178fd79d9 Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Mon, 13 Jan 2025 00:14:42 +0530 Subject: [PATCH 08/15] test fixes --- src/primary/mod.rs | 5 + src/primary/tests/header_tests.rs | 186 +++++---- src/synchronizer/mod.rs | 6 + src/synchronizer/tests/fetcher_tests.rs | 348 ++++++++++++----- src/synchronizer/tests/synchronizer_tests.rs | 388 ++++++++++++------- src/types/dag.rs | 42 +- src/types/mod.rs | 5 + src/types/tests/dag_tests.rs | 13 +- src/utils/mod.rs | 4 + 9 files changed, 680 insertions(+), 317 deletions(-) diff --git a/src/primary/mod.rs b/src/primary/mod.rs index b2f18fd..4d69870 100644 --- a/src/primary/mod.rs +++ b/src/primary/mod.rs @@ -259,3 +259,8 @@ impl BaseAgent for Primary { } } } + +#[cfg(test)] +mod tests { + mod header_tests; +} diff --git a/src/primary/tests/header_tests.rs b/src/primary/tests/header_tests.rs index 04d94c8..86f7121 100644 --- a/src/primary/tests/header_tests.rs +++ b/src/primary/tests/header_tests.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, sync::Arc, time::Duration}; -use libp2p::identity::ed25519::Keypair; +use libp2p::{identity::ed25519::Keypair, PeerId}; use tokio::{ sync::{broadcast, mpsc, watch}, time::sleep, @@ -17,12 +17,77 @@ use crate::{ certificate::Certificate, network::{NetworkRequest, ReceivedObject}, sync::SyncStatus, + traits::AsBytes, + signing::Signable, vote::Vote, Round, }, utils::CircularBuffer, }; +// First, let's create test helper functions +impl BlockHeader { + #[cfg(test)] + pub fn new_test() -> Self { + let peer_id = PeerId::random(); + let mut author = [0u8; 32]; + let peer_bytes = peer_id.to_bytes(); + let hash = blake3::hash(&peer_bytes); + author.copy_from_slice(hash.as_bytes()); + + Self { + round: 1, + author, + timestamp_ms: chrono::Utc::now().timestamp_millis() as u128, + certificates_ids: vec![], + digests: vec![], + } + } +} + +impl Vote { + #[cfg(test)] + pub fn new_test() -> Self { + let keypair = Keypair::generate(); + let authority = keypair.public().to_bytes(); + let header = BlockHeader::new_test(); + let signature = header.sign(&keypair).unwrap(); + + Self { + authority, + signature, + } + } + + #[cfg(test)] + pub fn new_test_invalid() -> Self { + let keypair = Keypair::generate(); + let authority = keypair.public().to_bytes(); + + Self { + authority, + signature: vec![0; 32], + } + } +} + +impl Db { + #[cfg(test)] + pub async fn new_in_memory() -> anyhow::Result { + let temp_dir = tempfile::tempdir()?; + Self::new(temp_dir.path().to_path_buf()) + } +} + +impl BatchId { + #[cfg(test)] + pub fn test_digest(value: u8) -> Self { + let mut digest = [0u8; 32]; + digest.fill(value); + Self(digest) + } +} + #[tokio::test] async fn test_header_builder_initialization() { let (network_tx, _) = mpsc::channel(100); @@ -35,17 +100,20 @@ async fn test_header_builder_initialization() { let committee = Committee::new_test(); let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); - let _header_builder = HeaderBuilder { + let handle = HeaderBuilder::spawn( + CancellationToken::new(), network_tx, certificate_tx, keypair, - _db: db, + db, header_trigger_rx, votes_rx, digests_buffer, committee, sync_status_rx, - }; + ); + + handle.abort(); } #[tokio::test] @@ -55,13 +123,17 @@ async fn test_wait_for_quorum() { let header = BlockHeader::new_test(); let threshold = 2; + // Create votes before spawning the task + let votes: Vec<_> = (0..threshold) + .map(|_| Vote::from_header(header.clone(), &keypair).unwrap()) + .collect(); + // send votes tokio::spawn(async move { - for i in 0..threshold { - let vote = Vote::new_test(); + for vote in votes { let received = ReceivedObject { object: vote, - peer_id: format!("peer_{}", i), + sender: PeerId::random(), }; votes_tx.send(received).unwrap(); sleep(Duration::from_millis(100)).await; @@ -77,7 +149,6 @@ async fn test_wait_for_quorum() { #[tokio::test] async fn test_header_builder_sync_status() { - // Test initialization let (network_tx, _) = mpsc::channel(100); let (certificate_tx, _) = mpsc::channel(100); let keypair = Keypair::generate(); @@ -88,23 +159,19 @@ async fn test_header_builder_sync_status() { let committee = Committee::new_test(); let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Incomplete); - // Verify initialization by creating header builder - let header_builder = HeaderBuilder { + // Spawn header builder - it returns a JoinHandle + let handle = HeaderBuilder::spawn( + CancellationToken::new(), network_tx, certificate_tx, keypair, - _db: db, + db, header_trigger_rx, votes_rx, digests_buffer, committee, sync_status_rx, - }; - - // Run header builder in background - let handle = tokio::spawn(async move { - header_builder.run().await.unwrap(); - }); + ); // Test that header builder waits for sync to complete sleep(Duration::from_millis(100)).await; @@ -125,22 +192,18 @@ async fn test_header_builder_with_empty_digests() { let committee = Committee::new_test(); let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); - let header_builder = HeaderBuilder { + let handle = HeaderBuilder::spawn( + CancellationToken::new(), network_tx, certificate_tx, keypair, - _db: db, + db, header_trigger_rx, votes_rx, digests_buffer, committee, sync_status_rx, - }; - - // run header builder - let handle = tokio::spawn(async move { - header_builder.run().await.unwrap(); - }); + ); // trigger header building with empty digests header_trigger_tx.send((1, HashSet::new())).unwrap(); @@ -161,22 +224,18 @@ async fn test_header_builder_multiple_rounds() { let committee = Committee::new_test(); let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); - let header_builder = HeaderBuilder { + let handle = HeaderBuilder::spawn( + CancellationToken::new(), network_tx, certificate_tx, keypair, - _db: db, + db, header_trigger_rx, votes_rx, digests_buffer, committee, sync_status_rx, - }; - - // run header builder - let handle = tokio::spawn(async move { - header_builder.run().await.unwrap(); - }); + ); // trigger multiple rounds for round in 1..=3 { @@ -201,31 +260,25 @@ async fn test_header_builder_quorum_timeout() { let committee = Committee::new_test(); let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); - let header_builder = HeaderBuilder { + let handle = HeaderBuilder::spawn( + CancellationToken::new(), network_tx, certificate_tx, keypair, - _db: db, + db, header_trigger_rx, votes_rx, digests_buffer, committee, sync_status_rx, - }; - - // run header builder - let handle = tokio::spawn(async move { - header_builder.run().await.unwrap(); - }); + ); // trigger header building but don't send any votes let mut certs = HashSet::new(); certs.insert(Certificate::genesis([0; 32])); header_trigger_tx.send((1, certs)).unwrap(); - // wait for timeout duration sleep(Duration::from_millis(110)).await; - handle.abort(); } @@ -241,26 +294,25 @@ async fn test_header_builder_invalid_votes() { let committee = Committee::new_test(); let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); - let header_builder = HeaderBuilder { + let handle = HeaderBuilder::spawn( + CancellationToken::new(), network_tx, certificate_tx, keypair, - _db: db, + db, header_trigger_rx, votes_rx, digests_buffer, committee, sync_status_rx, - }; - - // run header builder - let handle = tokio::spawn(async move { - header_builder.run().await.unwrap(); - }); + ); // send invalid votes let invalid_vote = Vote::new_test_invalid(); - votes_tx.send(ReceivedObject::new(invalid_vote, "peer1".to_string())).unwrap(); + votes_tx.send(ReceivedObject { + object: invalid_vote, + sender: PeerId::random(), + }).unwrap(); sleep(Duration::from_millis(10)).await; handle.abort(); @@ -278,38 +330,34 @@ async fn test_header_builder_digest_buffer() { let committee = Committee::new_test(); let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Complete); - let header_builder = HeaderBuilder { + let handle = HeaderBuilder::spawn( + CancellationToken::new(), network_tx, certificate_tx, keypair, - _db: db, + db, header_trigger_rx, votes_rx, - digests_buffer: digests_buffer.clone(), + digests_buffer.clone(), committee, sync_status_rx, - }; - - // Run header builder in background - let handle = tokio::spawn(async move { - header_builder.run().await.unwrap(); - }); + ); // Add digests to buffer { let mut buffer = digests_buffer.lock().await; - buffer.push(BatchId::new([1; 32])); - buffer.push(BatchId::new([2; 32])); - // This should overwrite the first digest - buffer.push(BatchId::new([3; 32])); + buffer.push(BatchId::test_digest(1)); + buffer.push(BatchId::test_digest(2)); + buffer.push(BatchId::test_digest(3)); } // Verify buffer state { - let buffer = digests_buffer.lock().await; - assert_eq!(buffer.len(), 2); - assert_eq!(buffer.as_slice()[0], BatchId::new([2; 32])); - assert_eq!(buffer.as_slice()[1], BatchId::new([3; 32])); + let mut buffer = digests_buffer.lock().await; + let contents = buffer.drain(); + assert_eq!(contents.len(), 2); + assert_eq!(contents[0], BatchId::test_digest(2)); + assert_eq!(contents[1], BatchId::test_digest(3)); } handle.abort(); diff --git a/src/synchronizer/mod.rs b/src/synchronizer/mod.rs index 2fdbb73..bd9ed4f 100644 --- a/src/synchronizer/mod.rs +++ b/src/synchronizer/mod.rs @@ -210,3 +210,9 @@ pub enum FetchError { #[error("id error")] IdError, } + +#[cfg(test)] +mod tests { + mod synchronizer_tests; + mod fetcher_tests; +} diff --git a/src/synchronizer/tests/fetcher_tests.rs b/src/synchronizer/tests/fetcher_tests.rs index faca3c2..2c91af4 100644 --- a/src/synchronizer/tests/fetcher_tests.rs +++ b/src/synchronizer/tests/fetcher_tests.rs @@ -1,116 +1,245 @@ -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; use tokio::sync::{broadcast, mpsc}; +use tokio_util::sync::CancellationToken; +use rand::random; use crate::{ + network::Connect, synchronizer::{ fetcher::Fetcher, - traits::{DataProvider, Fetch}, - FetchError, RequestedObject, + traits::{DataProvider, Fetch, IntoSyncRequest}, + RequestedObject, }, types::{ - network::{NetworkRequest, ReceivedObject, RequestPayload, SyncRequest, SyncResponse}, - traits::{AsBytes, Hash}, + network::{NetworkRequest, ReceivedObject, RequestPayload, SyncRequest, SyncResponse, SyncData}, + traits::{AsBytes, Hash, Random}, + Digest, + batch::{Batch, BatchId}, + transaction::Transaction, }, }; -#[derive(Clone, Debug)] -struct TestFetchable { - id: String, - data: Vec, +use libp2p::PeerId; +use async_trait::async_trait; + +#[derive(Clone)] +struct MockConnector; + +#[async_trait] +impl Connect for MockConnector { + async fn dispatch(&self, request: &RequestPayload, peer_id: PeerId) -> anyhow::Result<()> { + Ok(()) + } } -impl Hash for TestFetchable { - fn hash(&self) -> String { - self.id.clone() +#[async_trait] +impl Connect for Arc { + async fn dispatch(&self, request: &RequestPayload, peer_id: PeerId) -> anyhow::Result<()> { + self.as_ref().dispatch(request, peer_id).await + } +} + +// Create a mock connector that sleeps to simulate network delay +#[derive(Clone)] +struct SlowMockConnector; + +#[async_trait] +impl Connect for SlowMockConnector { + async fn dispatch(&self, _request: &RequestPayload, _peer_id: PeerId) -> anyhow::Result<()> { + tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; + Ok(()) } } +#[async_trait] +impl Connect for Arc { + async fn dispatch(&self, request: &RequestPayload, peer_id: PeerId) -> anyhow::Result<()> { + self.as_ref().dispatch(request, peer_id).await + } +} + +#[derive(Debug, Clone)] +struct TestFetchable { + data: Vec, +} + impl AsBytes for TestFetchable { fn bytes(&self) -> Vec { self.data.clone() } } +impl Random for TestFetchable { + fn random(size: usize) -> Self { + let data = (0..size).map(|_| rand::random()).collect(); + Self { data } + } +} + +impl IntoSyncRequest for TestFetchable { + fn into_sync_request(&self) -> SyncRequest { + let digest = blake3::hash(&self.bytes()).into(); + SyncRequest::Batches(vec![digest]) + } +} + +#[derive(Clone)] struct TestDataProvider { - peers: Vec, + peers: Vec, } +#[async_trait] impl DataProvider for TestDataProvider { - fn peers(&self) -> Vec { - self.peers.clone() + async fn sources(&self) -> Box + Send> { + Box::new(self.peers.clone().into_iter()) } } +#[tokio::test] +async fn test_fetcher_basic() { + let (network_tx, mut network_rx) = mpsc::channel(100); + let (sync_tx, sync_rx) = broadcast::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); + let connector = Arc::new(MockConnector); + let token = CancellationToken::new(); + + let _handle = Fetcher::spawn( + token.clone(), + network_tx, + commands_rx, + sync_rx, + connector, + 10, + ); + + let test_data = TestFetchable { + data: vec![1, 2, 3], + }; + + let peer_id = PeerId::random(); + let request = Box::new(RequestedObject { + object: test_data.clone(), + source: Box::new(peer_id), + }); + commands_tx.send(request).await.unwrap(); + + // Verify request is sent + let request = network_rx.recv().await.unwrap(); + match request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { + assert_eq!(pid, peer_id); + let expected_digest = blake3::hash(&test_data.bytes()).into(); + assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); + } + _ => panic!("Expected SendTo request with SyncRequest payload"), + } + + // Send successful response + let tx = Transaction::random(32); + let batch = Batch::new(vec![tx]); + let sync_data = SyncData::Batches(vec![batch]); + let request_id = test_data.into_sync_request().digest(); + let response = SyncResponse::Success(request_id, sync_data); + let received = ReceivedObject { + object: response, + sender: peer_id, + }; + sync_tx.send(received).unwrap(); + + // Drop the sender to signal no more commands + drop(commands_tx); +} + #[tokio::test] async fn test_fetcher_empty() { - let (requests_tx, _) = mpsc::channel(100); - let (_, responses_rx) = broadcast::channel(100); + let (network_tx, _) = mpsc::channel(100); + let (_, sync_rx) = broadcast::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); - let mut fetcher = Fetcher::new(); - let result = fetcher.run(requests_tx, responses_rx).await; - assert!(result.is_ok()); + let _handle = Fetcher::spawn( + CancellationToken::new(), + network_tx, + commands_rx, + sync_rx, + Arc::new(MockConnector), + 10, + ); + + // Drop commands_tx to signal no more commands + drop(commands_tx); } #[tokio::test] async fn test_fetcher_single_request() { - let (requests_tx, mut requests_rx) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); + let (network_tx, mut network_rx) = mpsc::channel(100); + let (sync_tx, sync_rx) = broadcast::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); let test_data = TestFetchable { - id: "test1".to_string(), data: vec![1, 2, 3], }; - let provider = Box::new(TestDataProvider { - peers: vec!["peer1".to_string()], + let peer_id = PeerId::random(); + let request = Box::new(RequestedObject { + object: test_data.clone(), + source: Box::new(peer_id), }); - let requested_object = RequestedObject { - object: test_data.clone(), - source: provider, - }; + let _handle = Fetcher::spawn( + CancellationToken::new(), + network_tx.clone(), + commands_rx, + sync_rx, + Arc::new(MockConnector), + 10, + ); - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(requested_object)); + // Send the request through the commands channel + commands_tx.send(request).await.unwrap(); - // run fetcher - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); + // Drop commands_tx to signal no more commands + drop(commands_tx); // verify request is sent - let request = requests_rx.recv().await.unwrap(); + let request = network_rx.recv().await.unwrap(); match request { - NetworkRequest::Sync { request, peer_id } => { - assert_eq!(peer_id, "peer1"); + NetworkRequest::SendTo(peer_id, _) => { + // Get first peer from provider + let expected_peer = peer_id; + assert_eq!(peer_id, expected_peer); } - _ => panic!("Expected sync request"), + _ => panic!("Expected SendTo request"), } - // send response - let response = SyncResponse::Success(RequestPayload::Data(test_data.bytes())); + // Send successful response + let tx = Transaction::random(32); + let batch = Batch::new(vec![tx]); + let sync_data = SyncData::Batches(vec![batch]); + let request_id = test_data.into_sync_request().digest(); + let response = SyncResponse::Success(request_id, sync_data); let received = ReceivedObject { object: response, - peer_id: "peer1".to_string(), + sender: peer_id, }; - responses_tx.send(received).unwrap(); + sync_tx.send(received).unwrap(); tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - handle.abort(); } #[tokio::test] async fn test_fetcher_timeout() { let (requests_tx, _) = mpsc::channel(100); - let (_, responses_rx) = broadcast::channel(100); + let (responses_tx, mut responses_rx) = broadcast::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); let test_data = TestFetchable { - id: "test1".to_string(), data: vec![1, 2, 3], }; let provider = Box::new(TestDataProvider { - peers: vec!["peer1".to_string()], + peers: vec![ + PeerId::random() + ], }); let requested_object = RequestedObject { @@ -118,99 +247,144 @@ async fn test_fetcher_timeout() { source: provider, }; - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(requested_object)); + let handle = Fetcher::spawn( + CancellationToken::new(), + requests_tx, + commands_rx, + responses_rx, + Arc::new(SlowMockConnector), + // Set a very short timeout for individual fetch operations + 1, + ); + + // Send the request through the commands channel + commands_tx.send(Box::new(requested_object)).await.unwrap(); - // Run fetcher with very short timeout - tokio::time::timeout( - tokio::time::Duration::from_millis(50), - fetcher.run(requests_tx, responses_rx) - ).await.unwrap_err(); + // Drop commands_tx to signal no more commands + drop(commands_tx); + + // Run fetcher with a longer timeout to ensure it has time to process + let result = tokio::time::timeout( + tokio::time::Duration::from_millis(1000), + handle + ).await; + + // The fetcher should complete successfully, but the fetch operation should have timed out + assert!(result.is_ok(), "Fetcher should complete"); } #[tokio::test] async fn test_fetcher_error_response() { let (requests_tx, _) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); + let (responses_tx, mut responses_rx) = broadcast::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); let test_data = TestFetchable { - id: "test1".to_string(), data: vec![1, 2, 3], }; let provider = Box::new(TestDataProvider { - peers: vec!["peer1".to_string()], + peers: vec![ + PeerId::random() + ], }); let requested_object = RequestedObject { - object: test_data, + object: test_data.clone(), source: provider, }; - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(requested_object)); + let _handle = Fetcher::spawn( + CancellationToken::new(), + requests_tx, + commands_rx, + responses_rx, + Arc::new(MockConnector), + 10, + ); - // run fetcher - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); + // Send the request through the commands channel + commands_tx.send(Box::new(requested_object)).await.unwrap(); + + // Drop commands_tx to signal no more commands + drop(commands_tx); - // send error response - let response = SyncResponse::Error("test error".to_string()); + // Send failure response + let request_id = test_data.into_sync_request().digest(); + let response = SyncResponse::Failure(request_id); let received = ReceivedObject { object: response, - peer_id: "peer1".to_string(), + sender: PeerId::random(), }; responses_tx.send(received).unwrap(); tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - handle.abort(); } #[tokio::test] async fn test_fetcher_multiple_peers() { let (requests_tx, mut requests_rx) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); + let (responses_tx, mut responses_rx) = broadcast::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); let test_data = TestFetchable { - id: "test1".to_string(), data: vec![1, 2, 3], }; let provider = Box::new(TestDataProvider { - peers: vec!["peer1".to_string(), "peer2".to_string(), "peer3".to_string()], + peers: vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + ], }); let requested_object = RequestedObject { object: test_data.clone(), - source: provider, + source: provider.clone(), }; - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(requested_object)); + let _handle = Fetcher::spawn( + CancellationToken::new(), + requests_tx, + commands_rx, + responses_rx, + Arc::new(MockConnector), + 10, + ); - // run fetcher - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); + // Send the request through the commands channel + commands_tx.send(Box::new(requested_object)).await.unwrap(); + + // Drop commands_tx to signal no more commands + drop(commands_tx); // verify first request is sent let request = requests_rx.recv().await.unwrap(); - match request { - NetworkRequest::Sync { request, peer_id } => { - assert!(["peer1", "peer2", "peer3"].contains(&peer_id.as_str())); + let peer_id = match request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { + // Get first peer from provider + let mut peer_ids = provider.sources().await; + let expected_peer = peer_ids.next().unwrap(); + assert_eq!(pid, expected_peer); + let expected_digest = blake3::hash(&test_data.bytes()).into(); + assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); + pid } - _ => panic!("Expected sync request"), - } + _ => panic!("Expected SendTo request with SyncRequest payload"), + }; - // send successful response from one peer - let response = SyncResponse::Success(RequestPayload::Data(test_data.bytes())); + // Send partial response + let tx = Transaction::random(32); + let batch = Batch::new(vec![tx]); + let sync_data = SyncData::Batches(vec![batch]); + let request_id = test_data.into_sync_request().digest(); + let response = SyncResponse::Success(request_id, sync_data); let received = ReceivedObject { object: response, - peer_id: "peer2".to_string(), + sender: peer_id, }; responses_tx.send(received).unwrap(); tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - handle.abort(); } \ No newline at end of file diff --git a/src/synchronizer/tests/synchronizer_tests.rs b/src/synchronizer/tests/synchronizer_tests.rs index b0d3376..4a855d4 100644 --- a/src/synchronizer/tests/synchronizer_tests.rs +++ b/src/synchronizer/tests/synchronizer_tests.rs @@ -1,60 +1,211 @@ -use std::{collections::HashSet, sync::Arc}; - -use tokio::sync::{broadcast, mpsc}; - use crate::{ + network::Connect, synchronizer::{ fetcher::Fetcher, - traits::{DataProvider, Fetch}, + traits::{DataProvider, IntoSyncRequest}, RequestedObject, }, types::{ - network::{NetworkRequest, ReceivedObject, RequestPayload, SyncRequest, SyncResponse}, - traits::{AsBytes, Hash}, + batch::{Batch, BatchId}, + network::{NetworkRequest, ReceivedObject, RequestPayload, SyncData, SyncRequest, SyncResponse}, + traits::{AsBytes, Hash, Random}, + transaction::Transaction, + Digest, }, }; +use async_trait::async_trait; +use libp2p::PeerId; +use std::{collections::HashSet, sync::Arc}; +use tokio::sync::{broadcast, mpsc}; +use tokio_util::sync::CancellationToken; -#[derive(Clone, Debug)] -struct MockData { - id: String, +struct MockConnector; + +#[async_trait] +impl Connect for MockConnector { + async fn dispatch(&self, _request: &RequestPayload, _peer_id: PeerId) -> anyhow::Result<()> { + Ok(()) + } } -impl Hash for MockData { - fn hash(&self) -> String { - self.id.clone() +#[async_trait] +impl Connect for Arc { + async fn dispatch(&self, request: &RequestPayload, peer_id: PeerId) -> anyhow::Result<()> { + self.as_ref().dispatch(request, peer_id).await } } +#[derive(Clone)] +struct MockData { + data: Vec, +} + impl AsBytes for MockData { fn bytes(&self) -> Vec { - self.id.as_bytes().to_vec() + self.data.clone() } } +impl IntoSyncRequest for MockData { + fn into_sync_request(&self) -> SyncRequest { + let digest = blake3::hash(&self.bytes()).into(); + SyncRequest::Batches(vec![digest]) + } +} + +#[derive(Clone)] struct MockDataProvider { - peers: Vec, + peers: Vec, } +#[async_trait] impl DataProvider for MockDataProvider { - fn peers(&self) -> Vec { - self.peers.clone() + async fn sources(&self) -> Box + Send> { + Box::new(self.peers.clone().into_iter()) } } +#[tokio::test] +async fn test_synchronizer_invalid_response_data() { + let (network_tx, mut network_rx) = mpsc::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); + let (sync_tx, sync_rx) = broadcast::channel(100); + + let mock_data = MockData { + data: vec![1, 2, 3], + }; + + let peer_id = PeerId::random(); + let provider = Box::new(MockDataProvider { + peers: vec![peer_id], + }); + + let requested_object = RequestedObject { + object: mock_data.clone(), + source: provider, + }; + + let _handle = Fetcher::spawn( + CancellationToken::new(), + network_tx, + commands_rx, + sync_rx, + Arc::new(MockConnector), + 10, + ); + + // Send the request through the commands channel + let request = Box::new(requested_object); + commands_tx.send(request).await.unwrap(); + + // Drop commands_tx to signal no more commands + drop(commands_tx); + + // verify request is sent + let request = network_rx.recv().await.unwrap(); + match request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { + assert_eq!(pid, peer_id); + let expected_digest = blake3::hash(&mock_data.bytes()).into(); + assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); + + // Send successful response + let tx = Transaction::random(32); + let batch = Batch::new(vec![tx]); + let sync_data = SyncData::Batches(vec![batch]); + let request_id = mock_data.into_sync_request().digest(); + let response = SyncResponse::Success(request_id, sync_data); + let received = ReceivedObject { + object: response, + sender: pid, + }; + sync_tx.send(received).unwrap(); + } + _ => panic!("Expected SendTo request with SyncRequest payload"), + }; + + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; +} + +#[tokio::test] +async fn test_synchronizer_multiple_peers() { + let (network_tx, mut network_rx) = mpsc::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); + let (sync_tx, sync_rx) = broadcast::channel(100); + + let mock_data = MockData { + data: vec![1, 2, 3], + }; + + let provider = Box::new(MockDataProvider { + peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], + }); + + let requested_object = RequestedObject { + object: mock_data.clone(), + source: provider.clone(), + }; + + let _handle = Fetcher::spawn( + CancellationToken::new(), + network_tx, + commands_rx, + sync_rx, + Arc::new(MockConnector), + 10, + ); + + // Send the request through the commands channel + let request = Box::new(requested_object); + commands_tx.send(request).await.unwrap(); + + // Drop commands_tx to signal no more commands + drop(commands_tx); + + // verify first request is sent + let request = network_rx.recv().await.unwrap(); + let matched_peer_id = match request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { + let mut peer_ids = provider.sources().await; + let expected_peer = peer_ids.next().unwrap(); + assert_eq!(pid, expected_peer); + let expected_digest = blake3::hash(&mock_data.bytes()).into(); + assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); + pid + } + _ => panic!("Expected SendTo request with SyncRequest payload"), + }; + + // Send successful response + let tx = Transaction::random(32); + let batch = Batch::new(vec![tx]); + let sync_data = SyncData::Batches(vec![batch]); + let request_id = mock_data.into_sync_request().digest(); + let response = SyncResponse::Success(request_id, sync_data); + let received = ReceivedObject { + object: response, + sender: matched_peer_id, + }; + sync_tx.send(received).unwrap(); + + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; +} + #[tokio::test] async fn test_fetcher_multiple_objects() { - let (requests_tx, mut requests_rx) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); + let (network_tx, mut network_rx) = mpsc::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); + let (sync_tx, sync_rx) = broadcast::channel(100); let mock_data1 = MockData { - id: "test_data1".to_string(), + data: vec![1, 2, 3], }; let mock_data2 = MockData { - id: "test_data2".to_string(), + data: vec![4, 5, 6], }; let mock_provider = Box::new(MockDataProvider { - peers: vec!["peer1".to_string()], + peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], }); let requested_object1 = RequestedObject { @@ -66,92 +217,104 @@ async fn test_fetcher_multiple_objects() { source: mock_provider, }; - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(requested_object1)); - fetcher.push(Box::new(requested_object2)); - - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); + let _handle = Fetcher::spawn( + CancellationToken::new(), + network_tx, + commands_rx, + sync_rx, + Arc::new(MockConnector), + 10, + ); + + // Send requests through the commands channel + commands_tx.send(Box::new(requested_object1)).await.unwrap(); + commands_tx.send(Box::new(requested_object2)).await.unwrap(); - let request1 = requests_rx.recv().await.unwrap(); - let request2 = requests_rx.recv().await.unwrap(); + // Drop commands_tx to signal no more commands + drop(commands_tx); - assert!(matches!(request1, NetworkRequest::Sync { .. })); - assert!(matches!(request2, NetworkRequest::Sync { .. })); + // Verify requests are sent + let request1 = network_rx.recv().await.unwrap(); + let request2 = network_rx.recv().await.unwrap(); - handle.abort(); + assert!(matches!(request1, NetworkRequest::SendTo(_, _))); + assert!(matches!(request2, NetworkRequest::SendTo(_, _))); } #[tokio::test] async fn test_synchronizer_concurrent_requests() { - let (requests_tx, mut requests_rx) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); + let (network_tx, mut network_rx) = mpsc::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); + let (sync_tx, sync_rx) = broadcast::channel(100); let mock_data1 = MockData { - id: "data1".to_string(), + data: vec![1, 2, 3], }; let mock_data2 = MockData { - id: "data2".to_string(), + data: vec![4, 5, 6], }; let mock_data3 = MockData { - id: "data3".to_string(), + data: vec![7, 8, 9], }; let provider1 = Box::new(MockDataProvider { - peers: vec!["peer1".to_string()], + peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], }); let provider2 = Box::new(MockDataProvider { - peers: vec!["peer2".to_string()], + peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], }); let provider3 = Box::new(MockDataProvider { - peers: vec!["peer3".to_string()], + peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], }); - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(RequestedObject { + let _handle = Fetcher::spawn( + CancellationToken::new(), + network_tx, + commands_rx, + sync_rx, + Arc::new(MockConnector), + 10, + ); + + // Send requests through the commands channel + commands_tx.send(Box::new(RequestedObject { object: mock_data1.clone(), source: provider1, - })); - fetcher.push(Box::new(RequestedObject { + })).await.unwrap(); + commands_tx.send(Box::new(RequestedObject { object: mock_data2.clone(), source: provider2, - })); - fetcher.push(Box::new(RequestedObject { + })).await.unwrap(); + commands_tx.send(Box::new(RequestedObject { object: mock_data3.clone(), source: provider3, - })); + })).await.unwrap(); - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); + // Drop commands_tx to signal no more commands + drop(commands_tx); let mut received_requests = HashSet::new(); for _ in 0..3 { - if let NetworkRequest::Sync { request: _, peer_id } = requests_rx.recv().await.unwrap() { + if let NetworkRequest::SendTo(peer_id, _) = network_rx.recv().await.unwrap() { received_requests.insert(peer_id); } } assert_eq!(received_requests.len(), 3); - assert!(received_requests.contains("peer1")); - assert!(received_requests.contains("peer2")); - assert!(received_requests.contains("peer3")); - - handle.abort(); } #[tokio::test] async fn test_synchronizer_retry_on_failure() { - let (requests_tx, mut requests_rx) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); + let (network_tx, mut network_rx) = mpsc::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); + let (sync_tx, sync_rx) = broadcast::channel(100); let mock_data = MockData { - id: "test_data".to_string(), + data: vec![1, 2, 3], }; let provider = Box::new(MockDataProvider { - peers: vec!["peer1".to_string(), "peer2".to_string()], + peers: vec![PeerId::random(), PeerId::random()], }); let requested_object = RequestedObject { @@ -159,86 +322,41 @@ async fn test_synchronizer_retry_on_failure() { source: provider, }; - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(requested_object)); + let _handle = Fetcher::spawn( + CancellationToken::new(), + network_tx, + commands_rx, + sync_rx, + Arc::new(MockConnector), + 10, + ); + + // Send request through the commands channel + commands_tx.send(Box::new(requested_object)).await.unwrap(); - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); + // Drop commands_tx to signal no more commands + drop(commands_tx); - let request1 = requests_rx.recv().await.unwrap(); + let request1 = network_rx.recv().await.unwrap(); match request1 { - NetworkRequest::Sync { request: _, peer_id } => { - let error_response = SyncResponse::Error("test error".to_string()); - responses_tx - .send(ReceivedObject { - object: error_response, - peer_id: peer_id.clone(), - }) - .unwrap(); + NetworkRequest::SendTo(peer_id, _) => { + let response = SyncResponse::Failure(mock_data.into_sync_request().digest()); + sync_tx.send(ReceivedObject { + object: response, + sender: peer_id, + }).unwrap(); } - _ => panic!("Expected sync request"), + _ => panic!("Expected SendTo request"), } - let request2 = requests_rx.recv().await.unwrap(); + let request2 = network_rx.recv().await.unwrap(); match request2 { - NetworkRequest::Sync { request: _, peer_id } => { - assert_ne!( - peer_id, - match request1 { - NetworkRequest::Sync { peer_id, .. } => peer_id, - _ => panic!("Expected sync request"), - } - ); - } - _ => panic!("Expected sync request"), - } - - handle.abort(); -} - -#[tokio::test] -async fn test_synchronizer_invalid_response_data() { - let (requests_tx, mut requests_rx) = mpsc::channel(100); - let (responses_tx, responses_rx) = broadcast::channel(100); - - let mock_data = MockData { - id: "test_data".to_string(), - }; - - let provider = Box::new(MockDataProvider { - peers: vec!["peer1".to_string(), "peer2".to_string()], - }); - - let requested_object = RequestedObject { - object: mock_data.clone(), - source: provider, - }; - - let mut fetcher = Fetcher::new(); - fetcher.push(Box::new(requested_object)); - - let handle = tokio::spawn(async move { - fetcher.run(requests_tx, responses_rx).await.unwrap(); - }); - - let request = requests_rx.recv().await.unwrap(); - match request { - NetworkRequest::Sync { request: _, peer_id } => { - let invalid_data = vec![0, 1, 2]; // Different from what was requested - let response = SyncResponse::Success(RequestPayload::Data(invalid_data)); - responses_tx - .send(ReceivedObject { - object: response, - peer_id, - }) - .unwrap(); + NetworkRequest::SendTo(peer_id1, _) => { + match request1 { + NetworkRequest::SendTo(peer_id2, _) => assert_ne!(peer_id1, peer_id2), + _ => panic!("Expected SendTo request"), + } } - _ => panic!("Expected sync request"), + _ => panic!("Expected SendTo request"), } - - let request = requests_rx.recv().await.unwrap(); - assert!(matches!(request, NetworkRequest::Sync { .. })); - - handle.abort(); } \ No newline at end of file diff --git a/src/types/dag.rs b/src/types/dag.rs index 659c32c..9008288 100644 --- a/src/types/dag.rs +++ b/src/types/dag.rs @@ -57,25 +57,29 @@ where } /// Check if a vertex has all its parents in the DAG, returning an error containing the missing parents if not. pub fn check_parents(&self, vertex: &Vertex) -> Result<(), DagError> { - if vertex.layer == 0 { + // Base layer (0) and layer 1 vertices don't require parents + if vertex.layer <= self.base_layer || vertex.layer == 1 { + if vertex.parents.is_empty() { + return Ok(()); + } + } + + // For other layers, check if all parents exist in any previous layer + let found_parents: HashSet<_> = (self.base_layer..vertex.layer) + .rev() + .flat_map(|layer| self.vertices_by_layers.get(&layer).into_iter().flatten()) + .collect(); + + if vertex.parents.iter().all(|p| found_parents.contains(p)) { Ok(()) } else { - self.vertices_by_layers - .get(&(vertex.layer - 1)) - .map(|potential_parents| { - if vertex.parents.is_subset(potential_parents) { - Ok(()) - } else { - Err(DagError::MissingParents( - vertex - .parents - .difference(potential_parents) - .cloned() - .collect(), - )) - } - }) - .unwrap_or(Err(DagError::MissingParents(vertex.parents.clone()))) + let missing: HashSet<_> = vertex + .parents + .iter() + .filter(|p| !found_parents.contains(*p)) + .cloned() + .collect(); + Err(DagError::MissingParents(missing)) } } /// Insert a vertex in the DAG, returning an error if its parents are missing but inserting it anyway. @@ -116,6 +120,10 @@ where .map(|vertex| vertex.data.clone()) .collect() } + /// Get a vertex by its ID + pub fn get(&self, id: &str) -> Option<&Vertex> { + self.vertices.get(id) + } } impl Vertex diff --git a/src/types/mod.rs b/src/types/mod.rs index 5d0c17f..bbe43d8 100644 --- a/src/types/mod.rs +++ b/src/types/mod.rs @@ -43,3 +43,8 @@ impl AsBytes for Acknowledgment { self.0.to_vec() } } + +#[cfg(test)] +mod tests { + mod dag_tests; +} diff --git a/src/types/tests/dag_tests.rs b/src/types/tests/dag_tests.rs index 526377b..3356fab 100644 --- a/src/types/tests/dag_tests.rs +++ b/src/types/tests/dag_tests.rs @@ -3,6 +3,7 @@ use std::collections::HashSet; use crate::types::{ dag::{Dag, DagError, Vertex}, traits::{AsBytes, Hash}, + Digest, }; #[derive(Clone, Debug)] @@ -10,12 +11,6 @@ struct TestData { value: u64, } -impl Hash for TestData { - fn hash(&self) -> String { - format!("test_data_{}", self.value) - } -} - impl AsBytes for TestData { fn bytes(&self) -> Vec { self.value.to_be_bytes().to_vec() @@ -24,7 +19,7 @@ impl AsBytes for TestData { #[tokio::test] async fn test_dag_creation_and_basic_ops() { - let base_layer = 0; + let base_layer: u64 = 0; let mut dag: Dag = Dag::new(base_layer); // Test initial state @@ -42,9 +37,9 @@ async fn test_dag_creation_and_basic_ops() { assert_eq!(dag.layer_size(1), 1); // Test vertex retrieval - let retrieved = dag.get_vertex(&vertex_id).unwrap(); + let retrieved = dag.get(&vertex_id).unwrap(); assert_eq!(retrieved.data().value, 1); - assert_eq!(retrieved.layer(), 1); + assert_eq!(*retrieved.layer(), 1); } #[tokio::test] diff --git a/src/utils/mod.rs b/src/utils/mod.rs index bd8294b..7279f0b 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -70,4 +70,8 @@ impl CircularBuffer { self.buffer = vec![None; self.size]; res } + + pub fn iter(&self) -> impl Iterator { + self.buffer.iter().filter_map(|x| x.as_ref()) + } } From 210a29bd75c522e5fd18ef7e1b900aec1d46cf9a Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Mon, 13 Jan 2025 00:39:52 +0530 Subject: [PATCH 09/15] test fixes --- src/synchronizer/tests/fetcher_tests.rs | 83 ++++++++++++++------ src/synchronizer/tests/synchronizer_tests.rs | 80 +++++++++++++++---- 2 files changed, 123 insertions(+), 40 deletions(-) diff --git a/src/synchronizer/tests/fetcher_tests.rs b/src/synchronizer/tests/fetcher_tests.rs index 2c91af4..2343988 100644 --- a/src/synchronizer/tests/fetcher_tests.rs +++ b/src/synchronizer/tests/fetcher_tests.rs @@ -324,19 +324,18 @@ async fn test_fetcher_error_response() { #[tokio::test] async fn test_fetcher_multiple_peers() { let (requests_tx, mut requests_rx) = mpsc::channel(100); - let (responses_tx, mut responses_rx) = broadcast::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); let (commands_tx, commands_rx) = mpsc::channel(100); let test_data = TestFetchable { data: vec![1, 2, 3], }; + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let peer3 = PeerId::random(); let provider = Box::new(TestDataProvider { - peers: vec![ - PeerId::random(), - PeerId::random(), - PeerId::random(), - ], + peers: vec![peer1, peer2, peer3], }); let requested_object = RequestedObject { @@ -359,32 +358,70 @@ async fn test_fetcher_multiple_peers() { // Drop commands_tx to signal no more commands drop(commands_tx); - // verify first request is sent + // Verify first request is sent to peer1 let request = requests_rx.recv().await.unwrap(); - let peer_id = match request { + match request { NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { - // Get first peer from provider - let mut peer_ids = provider.sources().await; - let expected_peer = peer_ids.next().unwrap(); - assert_eq!(pid, expected_peer); + assert_eq!(pid, peer1, "First request should be sent to peer1"); let expected_digest = blake3::hash(&test_data.bytes()).into(); assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); - pid + + // Send failure response from peer1 + let request_id = test_data.into_sync_request().digest(); + let response = SyncResponse::Failure(request_id); + let received = ReceivedObject { + object: response, + sender: peer1, + }; + responses_tx.send(received).unwrap(); } _ => panic!("Expected SendTo request with SyncRequest payload"), }; - // Send partial response - let tx = Transaction::random(32); - let batch = Batch::new(vec![tx]); - let sync_data = SyncData::Batches(vec![batch]); - let request_id = test_data.into_sync_request().digest(); - let response = SyncResponse::Success(request_id, sync_data); - let received = ReceivedObject { - object: response, - sender: peer_id, + // Verify second request is sent to peer2 + let request = requests_rx.recv().await.unwrap(); + match request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { + assert_eq!(pid, peer2, "Second request should be sent to peer2"); + let expected_digest = blake3::hash(&test_data.bytes()).into(); + assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); + + // Send failure response from peer2 + let request_id = test_data.into_sync_request().digest(); + let response = SyncResponse::Failure(request_id); + let received = ReceivedObject { + object: response, + sender: peer2, + }; + responses_tx.send(received).unwrap(); + } + _ => panic!("Expected SendTo request with SyncRequest payload"), + }; + + // Verify third request is sent to peer3 + let request = requests_rx.recv().await.unwrap(); + match request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { + assert_eq!(pid, peer3, "Third request should be sent to peer3"); + let expected_digest = blake3::hash(&test_data.bytes()).into(); + assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); + + // Send successful response from peer3 + let tx = Transaction::random(32); + let batch = Batch::new(vec![tx]); + let sync_data = SyncData::Batches(vec![batch]); + let request_id = test_data.into_sync_request().digest(); + let response = SyncResponse::Success(request_id, sync_data); + let received = ReceivedObject { + object: response, + sender: peer3, + }; + responses_tx.send(received).unwrap(); + } + _ => panic!("Expected SendTo request with SyncRequest payload"), }; - responses_tx.send(received).unwrap(); + // Verify no more requests are sent after successful response tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + assert!(requests_rx.try_recv().is_err(), "No more requests should be sent after successful response"); } \ No newline at end of file diff --git a/src/synchronizer/tests/synchronizer_tests.rs b/src/synchronizer/tests/synchronizer_tests.rs index 4a855d4..3c06565 100644 --- a/src/synchronizer/tests/synchronizer_tests.rs +++ b/src/synchronizer/tests/synchronizer_tests.rs @@ -75,9 +75,11 @@ async fn test_synchronizer_invalid_response_data() { data: vec![1, 2, 3], }; + // Create multiple peers for retry mechanism let peer_id = PeerId::random(); + let peer_id2 = PeerId::random(); let provider = Box::new(MockDataProvider { - peers: vec![peer_id], + peers: vec![peer_id, peer_id2], }); let requested_object = RequestedObject { @@ -85,8 +87,9 @@ async fn test_synchronizer_invalid_response_data() { source: provider, }; - let _handle = Fetcher::spawn( - CancellationToken::new(), + let token = CancellationToken::new(); + let handle = Fetcher::spawn( + token.clone(), network_tx, commands_rx, sync_rx, @@ -98,33 +101,76 @@ async fn test_synchronizer_invalid_response_data() { let request = Box::new(requested_object); commands_tx.send(request).await.unwrap(); - // Drop commands_tx to signal no more commands - drop(commands_tx); - - // verify request is sent - let request = network_rx.recv().await.unwrap(); - match request { + // Verify initial request is sent with timeout + let initial_request = tokio::time::timeout( + tokio::time::Duration::from_secs(5), + network_rx.recv() + ).await.expect("Timed out waiting for initial request") + .expect("Channel closed unexpectedly"); + + match initial_request { NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { assert_eq!(pid, peer_id); let expected_digest = blake3::hash(&mock_data.bytes()).into(); assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); - // Send successful response + // Send invalid response - completely different batch with different digest + let different_data = MockData { + data: vec![4, 5, 6], // Different data + }; + let tx = Transaction::random(32); + let invalid_batch = Batch::new(vec![tx]); + let sync_data = SyncData::Batches(vec![invalid_batch]); + + // Use the digest from the different data to ensure mismatch + let request_id = different_data.into_sync_request().digest(); + let response = SyncResponse::Success(request_id, sync_data); + + // Send the invalid response + sync_tx.send(ReceivedObject { + object: response, + sender: pid, + }).unwrap(); + } + _ => panic!("Expected initial SendTo request with SyncRequest payload"), + } + + // Give some time for the invalid response to be processed + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + + // Verify retry request is sent with timeout + let retry_request = tokio::time::timeout( + tokio::time::Duration::from_secs(5), + network_rx.recv() + ).await.expect("Timed out waiting for retry request") + .expect("Channel closed unexpectedly"); + + match retry_request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(_)) => { + assert_eq!(pid, peer_id2, "Retry should use the second peer"); + + // Send valid response from second peer to complete the test let tx = Transaction::random(32); let batch = Batch::new(vec![tx]); let sync_data = SyncData::Batches(vec![batch]); let request_id = mock_data.into_sync_request().digest(); let response = SyncResponse::Success(request_id, sync_data); - let received = ReceivedObject { + sync_tx.send(ReceivedObject { object: response, - sender: pid, - }; - sync_tx.send(received).unwrap(); + sender: peer_id2, + }).unwrap(); } - _ => panic!("Expected SendTo request with SyncRequest payload"), - }; - + _ => panic!("Expected retry request with SyncRequest payload"), + } + + // Verify no more requests are sent tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + assert!(network_rx.try_recv().is_err(), "No more requests should be sent after successful response"); + + // Clean shutdown + drop(commands_tx); + token.cancel(); + let _ = handle.await; } #[tokio::test] From 86de8e20e92cc23bf68aa669f74b9ac2d0bc71b6 Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Mon, 13 Jan 2025 10:37:34 +0530 Subject: [PATCH 10/15] syncronizer test refactoring --- src/primary/tests/header_tests.rs | 2 +- src/synchronizer/tests/synchronizer_tests.rs | 495 +++++++++++-------- 2 files changed, 294 insertions(+), 203 deletions(-) diff --git a/src/primary/tests/header_tests.rs b/src/primary/tests/header_tests.rs index 86f7121..bb08b61 100644 --- a/src/primary/tests/header_tests.rs +++ b/src/primary/tests/header_tests.rs @@ -25,7 +25,7 @@ use crate::{ utils::CircularBuffer, }; -// First, let's create test helper functions +// test helper functions impl BlockHeader { #[cfg(test)] pub fn new_test() -> Self { diff --git a/src/synchronizer/tests/synchronizer_tests.rs b/src/synchronizer/tests/synchronizer_tests.rs index 3c06565..46c9152 100644 --- a/src/synchronizer/tests/synchronizer_tests.rs +++ b/src/synchronizer/tests/synchronizer_tests.rs @@ -2,7 +2,7 @@ use crate::{ network::Connect, synchronizer::{ fetcher::Fetcher, - traits::{DataProvider, IntoSyncRequest}, + traits::{DataProvider, IntoSyncRequest, Fetch}, RequestedObject, }, types::{ @@ -18,6 +18,7 @@ use libp2p::PeerId; use std::{collections::HashSet, sync::Arc}; use tokio::sync::{broadcast, mpsc}; use tokio_util::sync::CancellationToken; +use rstest::*; struct MockConnector; @@ -65,25 +66,113 @@ impl DataProvider for MockDataProvider { } } -#[tokio::test] -async fn test_synchronizer_invalid_response_data() { - let (network_tx, mut network_rx) = mpsc::channel(100); - let (commands_tx, commands_rx) = mpsc::channel(100); - let (sync_tx, sync_rx) = broadcast::channel(100); - - let mock_data = MockData { +type TestReceivedObject = ReceivedObject; +type TestRequestedObject = RequestedObject; +type BoxedFetch = Box; + +#[fixture] +fn test_data() -> MockData { + MockData { data: vec![1, 2, 3], + } +} + +#[fixture] +fn test_data_set() -> Vec { + vec![ + MockData { data: vec![1, 2, 3] }, + MockData { data: vec![4, 5, 6] }, + MockData { data: vec![7, 8, 9] }, + ] +} + +#[fixture] +fn channels() -> ( + mpsc::Sender, + mpsc::Receiver, + broadcast::Sender, + broadcast::Receiver, + mpsc::Sender, + mpsc::Receiver, +) { + let (network_tx, network_rx) = mpsc::channel(100); + let (sync_tx, sync_rx) = broadcast::channel(100); + let (commands_tx, commands_rx) = mpsc::channel(100); + (network_tx, network_rx, sync_tx, sync_rx, commands_tx, commands_rx) +} + +#[fixture] +fn peers() -> (PeerId, PeerId, PeerId) { + (PeerId::random(), PeerId::random(), PeerId::random()) +} + +#[fixture] +fn fetcher_handle( + channels: ( + mpsc::Sender, + mpsc::Receiver, + broadcast::Sender, + broadcast::Receiver, + mpsc::Sender, + mpsc::Receiver, + ), +) -> (CancellationToken, tokio::task::JoinHandle<()>) { + let (network_tx, _, _, sync_rx, _, commands_rx) = channels; + let token = CancellationToken::new(); + let handle = Fetcher::spawn( + token.clone(), + network_tx, + commands_rx, + sync_rx, + Arc::new(MockConnector), + 10, + ); + (token, handle) +} + +// Helper function to create a valid batch response +fn create_valid_response(request_id: Digest) -> SyncResponse { + let tx = Transaction::random(32); + let batch = Batch::new(vec![tx]); + let sync_data = SyncData::Batches(vec![batch]); + SyncResponse::Success(request_id, sync_data) +} + +// Helper function to create an invalid batch response +fn create_invalid_response() -> SyncResponse { + let different_data = MockData { + data: vec![4, 5, 6], }; + let tx = Transaction::random(32); + let batch = Batch::new(vec![tx]); + let sync_data = SyncData::Batches(vec![batch]); + let request_id = different_data.into_sync_request().digest(); + SyncResponse::Success(request_id, sync_data) +} + +#[rstest] +#[tokio::test] +async fn test_synchronizer_invalid_response_data( + test_data: MockData, + channels: ( + mpsc::Sender, + mpsc::Receiver, + broadcast::Sender, + broadcast::Receiver, + mpsc::Sender, + mpsc::Receiver, + ), + peers: (PeerId, PeerId, PeerId), +) { + let (network_tx, mut network_rx, sync_tx, sync_rx, commands_tx, commands_rx) = channels; + let (peer_id, peer_id2, _) = peers; - // Create multiple peers for retry mechanism - let peer_id = PeerId::random(); - let peer_id2 = PeerId::random(); let provider = Box::new(MockDataProvider { peers: vec![peer_id, peer_id2], }); - let requested_object = RequestedObject { - object: mock_data.clone(), + let requested_object = TestRequestedObject { + object: test_data.clone(), source: provider, }; @@ -98,8 +187,7 @@ async fn test_synchronizer_invalid_response_data() { ); // Send the request through the commands channel - let request = Box::new(requested_object); - commands_tx.send(request).await.unwrap(); + commands_tx.send(Box::new(requested_object) as BoxedFetch).await.unwrap(); // Verify initial request is sent with timeout let initial_request = tokio::time::timeout( @@ -111,23 +199,12 @@ async fn test_synchronizer_invalid_response_data() { match initial_request { NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { assert_eq!(pid, peer_id); - let expected_digest = blake3::hash(&mock_data.bytes()).into(); + let expected_digest = blake3::hash(&test_data.bytes()).into(); assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); - // Send invalid response - completely different batch with different digest - let different_data = MockData { - data: vec![4, 5, 6], // Different data - }; - let tx = Transaction::random(32); - let invalid_batch = Batch::new(vec![tx]); - let sync_data = SyncData::Batches(vec![invalid_batch]); - - // Use the digest from the different data to ensure mismatch - let request_id = different_data.into_sync_request().digest(); - let response = SyncResponse::Success(request_id, sync_data); - - // Send the invalid response - sync_tx.send(ReceivedObject { + // Send invalid response + let response = create_invalid_response(); + sync_tx.send(TestReceivedObject { object: response, sender: pid, }).unwrap(); @@ -150,12 +227,9 @@ async fn test_synchronizer_invalid_response_data() { assert_eq!(pid, peer_id2, "Retry should use the second peer"); // Send valid response from second peer to complete the test - let tx = Transaction::random(32); - let batch = Batch::new(vec![tx]); - let sync_data = SyncData::Batches(vec![batch]); - let request_id = mock_data.into_sync_request().digest(); - let response = SyncResponse::Success(request_id, sync_data); - sync_tx.send(ReceivedObject { + let request_id = test_data.into_sync_request().digest(); + let response = create_valid_response(request_id); + sync_tx.send(TestReceivedObject { object: response, sender: peer_id2, }).unwrap(); @@ -173,27 +247,35 @@ async fn test_synchronizer_invalid_response_data() { let _ = handle.await; } +#[rstest] #[tokio::test] -async fn test_synchronizer_multiple_peers() { - let (network_tx, mut network_rx) = mpsc::channel(100); - let (commands_tx, commands_rx) = mpsc::channel(100); - let (sync_tx, sync_rx) = broadcast::channel(100); - - let mock_data = MockData { - data: vec![1, 2, 3], - }; +async fn test_synchronizer_multiple_peers( + test_data: MockData, + channels: ( + mpsc::Sender, + mpsc::Receiver, + broadcast::Sender, + broadcast::Receiver, + mpsc::Sender, + mpsc::Receiver, + ), + peers: (PeerId, PeerId, PeerId), +) { + let (network_tx, mut network_rx, sync_tx, sync_rx, commands_tx, commands_rx) = channels; + let (peer1, peer2, peer3) = peers; let provider = Box::new(MockDataProvider { - peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], + peers: vec![peer1, peer2, peer3], }); - let requested_object = RequestedObject { - object: mock_data.clone(), + let requested_object = TestRequestedObject { + object: test_data.clone(), source: provider.clone(), }; - let _handle = Fetcher::spawn( - CancellationToken::new(), + let token = CancellationToken::new(); + let handle = Fetcher::spawn( + token.clone(), network_tx, commands_rx, sync_rx, @@ -202,207 +284,216 @@ async fn test_synchronizer_multiple_peers() { ); // Send the request through the commands channel - let request = Box::new(requested_object); - commands_tx.send(request).await.unwrap(); + commands_tx.send(Box::new(requested_object) as BoxedFetch).await.unwrap(); - // Drop commands_tx to signal no more commands - drop(commands_tx); - - // verify first request is sent + // Verify first request is sent let request = network_rx.recv().await.unwrap(); - let matched_peer_id = match request { + match request { NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { - let mut peer_ids = provider.sources().await; - let expected_peer = peer_ids.next().unwrap(); - assert_eq!(pid, expected_peer); - let expected_digest = blake3::hash(&mock_data.bytes()).into(); + assert_eq!(pid, peer1); + let expected_digest = blake3::hash(&test_data.bytes()).into(); assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); - pid + + // Send successful response + let request_id = test_data.into_sync_request().digest(); + let response = create_valid_response(request_id); + sync_tx.send(TestReceivedObject { + object: response, + sender: peer1, + }).unwrap(); } _ => panic!("Expected SendTo request with SyncRequest payload"), - }; - - // Send successful response - let tx = Transaction::random(32); - let batch = Batch::new(vec![tx]); - let sync_data = SyncData::Batches(vec![batch]); - let request_id = mock_data.into_sync_request().digest(); - let response = SyncResponse::Success(request_id, sync_data); - let received = ReceivedObject { - object: response, - sender: matched_peer_id, - }; - sync_tx.send(received).unwrap(); + } + // Verify no more requests are sent tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; -} - -#[tokio::test] -async fn test_fetcher_multiple_objects() { - let (network_tx, mut network_rx) = mpsc::channel(100); - let (commands_tx, commands_rx) = mpsc::channel(100); - let (sync_tx, sync_rx) = broadcast::channel(100); - - let mock_data1 = MockData { - data: vec![1, 2, 3], - }; - let mock_data2 = MockData { - data: vec![4, 5, 6], - }; - - let mock_provider = Box::new(MockDataProvider { - peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], - }); - - let requested_object1 = RequestedObject { - object: mock_data1, - source: mock_provider.clone(), - }; - let requested_object2 = RequestedObject { - object: mock_data2, - source: mock_provider, - }; - - let _handle = Fetcher::spawn( - CancellationToken::new(), - network_tx, - commands_rx, - sync_rx, - Arc::new(MockConnector), - 10, - ); - - // Send requests through the commands channel - commands_tx.send(Box::new(requested_object1)).await.unwrap(); - commands_tx.send(Box::new(requested_object2)).await.unwrap(); + assert!(network_rx.try_recv().is_err(), "No more requests should be sent after successful response"); - // Drop commands_tx to signal no more commands + // Clean shutdown drop(commands_tx); - - // Verify requests are sent - let request1 = network_rx.recv().await.unwrap(); - let request2 = network_rx.recv().await.unwrap(); - - assert!(matches!(request1, NetworkRequest::SendTo(_, _))); - assert!(matches!(request2, NetworkRequest::SendTo(_, _))); + token.cancel(); + let _ = handle.await; } +#[rstest] #[tokio::test] -async fn test_synchronizer_concurrent_requests() { - let (network_tx, mut network_rx) = mpsc::channel(100); - let (commands_tx, commands_rx) = mpsc::channel(100); - let (sync_tx, sync_rx) = broadcast::channel(100); - - let mock_data1 = MockData { - data: vec![1, 2, 3], - }; - let mock_data2 = MockData { - data: vec![4, 5, 6], - }; - let mock_data3 = MockData { - data: vec![7, 8, 9], - }; - - let provider1 = Box::new(MockDataProvider { - peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], - }); - let provider2 = Box::new(MockDataProvider { - peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], - }); - let provider3 = Box::new(MockDataProvider { - peers: vec![PeerId::random(), PeerId::random(), PeerId::random()], - }); +async fn test_synchronizer_concurrent_requests( + test_data_set: Vec, + channels: ( + mpsc::Sender, + mpsc::Receiver, + broadcast::Sender, + broadcast::Receiver, + mpsc::Sender, + mpsc::Receiver, + ), + peers: (PeerId, PeerId, PeerId), +) { + let (network_tx, mut network_rx, sync_tx, sync_rx, commands_tx, commands_rx) = channels; + let (peer1, peer2, peer3) = peers; - let _handle = Fetcher::spawn( - CancellationToken::new(), + let token = CancellationToken::new(); + let handle = Fetcher::spawn( + token.clone(), network_tx, commands_rx, sync_rx, Arc::new(MockConnector), 10, ); - - // Send requests through the commands channel - commands_tx.send(Box::new(RequestedObject { - object: mock_data1.clone(), - source: provider1, - })).await.unwrap(); - commands_tx.send(Box::new(RequestedObject { - object: mock_data2.clone(), - source: provider2, - })).await.unwrap(); - commands_tx.send(Box::new(RequestedObject { - object: mock_data3.clone(), - source: provider3, - })).await.unwrap(); - // Drop commands_tx to signal no more commands - drop(commands_tx); + // Send multiple requests concurrently and track their digests + let mut expected_digests = HashSet::new(); + for test_data in test_data_set.iter() { + let provider = Box::new(MockDataProvider { + peers: vec![peer1, peer2, peer3], + }); + + let requested_object = TestRequestedObject { + object: test_data.clone(), + source: provider, + }; + + // Store the expected digest + let sync_req = test_data.into_sync_request(); + if let SyncRequest::Batches(digests) = sync_req { + expected_digests.insert(digests[0]); + } + + commands_tx.send(Box::new(requested_object) as BoxedFetch).await.unwrap(); + } - let mut received_requests = HashSet::new(); - for _ in 0..3 { - if let NetworkRequest::SendTo(peer_id, _) = network_rx.recv().await.unwrap() { - received_requests.insert(peer_id); + // Verify all requests are processed + let mut received_digests = HashSet::new(); + for _ in 0..test_data_set.len() { + let request = network_rx.recv().await.unwrap(); + match request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(SyncRequest::Batches(digests))) => { + let digest = digests[0]; + assert!( + expected_digests.contains(&digest), + "Received unexpected request digest: {:?}, expected one of: {:?}", + digest, + expected_digests + ); + received_digests.insert(digest); + + // Send successful response + let response = create_valid_response(digest); + sync_tx.send(TestReceivedObject { + object: response, + sender: pid, + }).unwrap(); + } + _ => panic!("Expected SendTo request with SyncRequest payload"), } } - assert_eq!(received_requests.len(), 3); + // Verify we received requests for all expected digests + assert_eq!( + received_digests.len(), + expected_digests.len(), + "Should receive requests for all test data" + ); + assert!( + received_digests.is_subset(&expected_digests), + "All received digests should be from our requests" + ); + assert!( + expected_digests.is_subset(&received_digests), + "All expected digests should be requested" + ); + + // Verify no more requests are sent + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + assert!(network_rx.try_recv().is_err(), "No more requests should be sent after all responses"); + + // Clean shutdown + drop(commands_tx); + token.cancel(); + let _ = handle.await; } +#[rstest] #[tokio::test] -async fn test_synchronizer_retry_on_failure() { - let (network_tx, mut network_rx) = mpsc::channel(100); - let (commands_tx, commands_rx) = mpsc::channel(100); - let (sync_tx, sync_rx) = broadcast::channel(100); - - let mock_data = MockData { - data: vec![1, 2, 3], - }; +async fn test_synchronizer_retry_on_failure( + test_data: MockData, + channels: ( + mpsc::Sender, + mpsc::Receiver, + broadcast::Sender, + broadcast::Receiver, + mpsc::Sender, + mpsc::Receiver, + ), + peers: (PeerId, PeerId, PeerId), +) { + let (network_tx, mut network_rx, sync_tx, sync_rx, commands_tx, commands_rx) = channels; + let (peer1, peer2, _) = peers; let provider = Box::new(MockDataProvider { - peers: vec![PeerId::random(), PeerId::random()], + peers: vec![peer1, peer2], }); - let requested_object = RequestedObject { - object: mock_data.clone(), + let requested_object = TestRequestedObject { + object: test_data.clone(), source: provider, }; - let _handle = Fetcher::spawn( - CancellationToken::new(), + let token = CancellationToken::new(); + let handle = Fetcher::spawn( + token.clone(), network_tx, commands_rx, sync_rx, Arc::new(MockConnector), 10, ); - - // Send request through the commands channel - commands_tx.send(Box::new(requested_object)).await.unwrap(); - // Drop commands_tx to signal no more commands - drop(commands_tx); + // Send the request through the commands channel + commands_tx.send(Box::new(requested_object) as BoxedFetch).await.unwrap(); - let request1 = network_rx.recv().await.unwrap(); - match request1 { - NetworkRequest::SendTo(peer_id, _) => { - let response = SyncResponse::Failure(mock_data.into_sync_request().digest()); - sync_tx.send(ReceivedObject { + // Verify first request is sent + let request = network_rx.recv().await.unwrap(); + match request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(_)) => { + assert_eq!(pid, peer1, "First request should be sent to peer1"); + + // Send failure response + let request_id = test_data.into_sync_request().digest(); + let response = SyncResponse::Failure(request_id); + sync_tx.send(TestReceivedObject { object: response, - sender: peer_id, + sender: peer1, }).unwrap(); } - _ => panic!("Expected SendTo request"), + _ => panic!("Expected SendTo request with SyncRequest payload"), } - let request2 = network_rx.recv().await.unwrap(); - match request2 { - NetworkRequest::SendTo(peer_id1, _) => { - match request1 { - NetworkRequest::SendTo(peer_id2, _) => assert_ne!(peer_id1, peer_id2), - _ => panic!("Expected SendTo request"), - } + // Verify retry request is sent to second peer + let retry_request = network_rx.recv().await.unwrap(); + match retry_request { + NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(_)) => { + assert_eq!(pid, peer2, "Retry should be sent to peer2"); + assert_ne!(pid, peer1, "Retry should use a different peer"); + + // Send successful response from second peer + let request_id = test_data.into_sync_request().digest(); + let response = create_valid_response(request_id); + sync_tx.send(TestReceivedObject { + object: response, + sender: peer2, + }).unwrap(); } - _ => panic!("Expected SendTo request"), + _ => panic!("Expected retry request with SyncRequest payload"), } + + // Verify no more requests are sent + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + assert!(network_rx.try_recv().is_err(), "No more requests should be sent after successful response"); + + // Clean shutdown + drop(commands_tx); + token.cancel(); + let _ = handle.await; } \ No newline at end of file From ace544a2aacd191ca2f12101f2d7cc80d94ff304 Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Mon, 13 Jan 2025 10:40:44 +0530 Subject: [PATCH 11/15] dag nit --- src/types/dag.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/types/dag.rs b/src/types/dag.rs index 9008288..c14db86 100644 --- a/src/types/dag.rs +++ b/src/types/dag.rs @@ -57,14 +57,14 @@ where } /// Check if a vertex has all its parents in the DAG, returning an error containing the missing parents if not. pub fn check_parents(&self, vertex: &Vertex) -> Result<(), DagError> { - // Base layer (0) and layer 1 vertices don't require parents - if vertex.layer <= self.base_layer || vertex.layer == 1 { + // Only base layer vertices can be parentless + if vertex.layer == self.base_layer { if vertex.parents.is_empty() { return Ok(()); } } - // For other layers, check if all parents exist in any previous layer + // All other vertices must have valid parents in previous layers let found_parents: HashSet<_> = (self.base_layer..vertex.layer) .rev() .flat_map(|layer| self.vertices_by_layers.get(&layer).into_iter().flatten()) From f0940dad3ca579445fad9ffa6615318d55c9b770 Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Mon, 13 Jan 2025 13:56:39 +0530 Subject: [PATCH 12/15] fmt nit --- src/primary/header_elector.rs | 16 +- src/primary/tests/header_tests.rs | 24 +- src/settings/parser.rs | 11 +- src/synchronizer/mod.rs | 2 +- src/synchronizer/tests/fetcher_tests.rs | 110 +++++---- src/synchronizer/tests/synchronizer_tests.rs | 228 ++++++++++++------- src/types/tests/dag_tests.rs | 60 ++--- 7 files changed, 253 insertions(+), 198 deletions(-) diff --git a/src/primary/header_elector.rs b/src/primary/header_elector.rs index 53ae7ef..66e0c5c 100644 --- a/src/primary/header_elector.rs +++ b/src/primary/header_elector.rs @@ -152,9 +152,7 @@ mod test { use crate::{ db::{Column, Db}, - primary::test_utils::fixtures::{ - random_digests, CHANNEL_CAPACITY, - }, + primary::test_utils::fixtures::{random_digests, CHANNEL_CAPACITY}, settings::parser::Committee, types::{ block_header::BlockHeader, @@ -187,20 +185,20 @@ mod test { let (round_tx, round_rx) = watch::channel((0, HashSet::new())); let (incomplete_headers_tx, incomplete_headers_rx) = mpsc::channel(CHANNEL_CAPACITY); let (network_tx, network_rx) = mpsc::channel(CHANNEL_CAPACITY); - + // Create a temporary directory for the test database let temp_dir = tempdir().unwrap(); let db_path = temp_dir.path().join("test.db"); - + let db = Arc::new(Db::new(db_path).unwrap()); - + let validator_keypair = ed25519::Keypair::generate(); let token = CancellationToken::new(); let db_clone = db.clone(); let token_clone = token.clone(); - + let committee = Committee::new_test(); - + tokio::spawn(async move { HeaderElector::spawn( token_clone, @@ -215,7 +213,7 @@ mod test { .await .unwrap() }); - + ( headers_tx, round_tx, diff --git a/src/primary/tests/header_tests.rs b/src/primary/tests/header_tests.rs index bb08b61..3f4b990 100644 --- a/src/primary/tests/header_tests.rs +++ b/src/primary/tests/header_tests.rs @@ -16,9 +16,9 @@ use crate::{ block_header::BlockHeader, certificate::Certificate, network::{NetworkRequest, ReceivedObject}, + signing::Signable, sync::SyncStatus, traits::AsBytes, - signing::Signable, vote::Vote, Round, }, @@ -34,7 +34,7 @@ impl BlockHeader { let peer_bytes = peer_id.to_bytes(); let hash = blake3::hash(&peer_bytes); author.copy_from_slice(hash.as_bytes()); - + Self { round: 1, author, @@ -52,7 +52,7 @@ impl Vote { let authority = keypair.public().to_bytes(); let header = BlockHeader::new_test(); let signature = header.sign(&keypair).unwrap(); - + Self { authority, signature, @@ -63,7 +63,7 @@ impl Vote { pub fn new_test_invalid() -> Self { let keypair = Keypair::generate(); let authority = keypair.public().to_bytes(); - + Self { authority, signature: vec![0; 32], @@ -159,7 +159,7 @@ async fn test_header_builder_sync_status() { let committee = Committee::new_test(); let (sync_status_tx, sync_status_rx) = watch::channel(SyncStatus::Incomplete); - // Spawn header builder - it returns a JoinHandle + // Spawn header builder returning a JoinHandle let handle = HeaderBuilder::spawn( CancellationToken::new(), network_tx, @@ -176,7 +176,7 @@ async fn test_header_builder_sync_status() { // Test that header builder waits for sync to complete sleep(Duration::from_millis(100)).await; sync_status_tx.send(SyncStatus::Complete).unwrap(); - + handle.abort(); } @@ -309,10 +309,12 @@ async fn test_header_builder_invalid_votes() { // send invalid votes let invalid_vote = Vote::new_test_invalid(); - votes_tx.send(ReceivedObject { - object: invalid_vote, - sender: PeerId::random(), - }).unwrap(); + votes_tx + .send(ReceivedObject { + object: invalid_vote, + sender: PeerId::random(), + }) + .unwrap(); sleep(Duration::from_millis(10)).await; handle.abort(); @@ -361,4 +363,4 @@ async fn test_header_builder_digest_buffer() { } handle.abort(); -} \ No newline at end of file +} diff --git a/src/settings/parser.rs b/src/settings/parser.rs index dfb7c35..ad4d540 100644 --- a/src/settings/parser.rs +++ b/src/settings/parser.rs @@ -1,4 +1,7 @@ -use libp2p::{identity::{self, ed25519}, PeerId}; +use libp2p::{ + identity::{self, ed25519}, + PeerId, +}; use serde::{Deserialize, Serialize}; use std::path::Path; @@ -45,13 +48,13 @@ impl Committee { #[cfg(test)] pub fn new_test() -> Self { let mut authorities = Vec::new(); - + // Add three test authorities for i in 0..3 { let keypair = ed25519::Keypair::generate(); let public_key = identity::PublicKey::from(keypair.public()); let peer_id = PeerId::from_public_key(&public_key); - + let authority = AuthorityInfo { authority_id: peer_id, authority_pubkey: hex::encode(keypair.public().to_bytes()), @@ -61,7 +64,7 @@ impl Committee { }; authorities.push(authority); } - + Committee { authorities } } } diff --git a/src/synchronizer/mod.rs b/src/synchronizer/mod.rs index bd9ed4f..6adcaa0 100644 --- a/src/synchronizer/mod.rs +++ b/src/synchronizer/mod.rs @@ -213,6 +213,6 @@ pub enum FetchError { #[cfg(test)] mod tests { - mod synchronizer_tests; mod fetcher_tests; + mod synchronizer_tests; } diff --git a/src/synchronizer/tests/fetcher_tests.rs b/src/synchronizer/tests/fetcher_tests.rs index 2343988..0959368 100644 --- a/src/synchronizer/tests/fetcher_tests.rs +++ b/src/synchronizer/tests/fetcher_tests.rs @@ -1,7 +1,7 @@ +use rand::random; use std::{collections::HashSet, sync::Arc}; use tokio::sync::{broadcast, mpsc}; use tokio_util::sync::CancellationToken; -use rand::random; use crate::{ network::Connect, @@ -11,16 +11,18 @@ use crate::{ RequestedObject, }, types::{ - network::{NetworkRequest, ReceivedObject, RequestPayload, SyncRequest, SyncResponse, SyncData}, - traits::{AsBytes, Hash, Random}, - Digest, batch::{Batch, BatchId}, + network::{ + NetworkRequest, ReceivedObject, RequestPayload, SyncData, SyncRequest, SyncResponse, + }, + traits::{AsBytes, Hash, Random}, transaction::Transaction, + Digest, }, }; -use libp2p::PeerId; use async_trait::async_trait; +use libp2p::PeerId; #[derive(Clone)] struct MockConnector; @@ -155,7 +157,7 @@ async fn test_fetcher_empty() { let (network_tx, _) = mpsc::channel(100); let (_, sync_rx) = broadcast::channel(100); let (commands_tx, commands_rx) = mpsc::channel(100); - + let _handle = Fetcher::spawn( CancellationToken::new(), network_tx, @@ -164,7 +166,7 @@ async fn test_fetcher_empty() { Arc::new(MockConnector), 10, ); - + // Drop commands_tx to signal no more commands drop(commands_tx); } @@ -174,17 +176,17 @@ async fn test_fetcher_single_request() { let (network_tx, mut network_rx) = mpsc::channel(100); let (sync_tx, sync_rx) = broadcast::channel(100); let (commands_tx, commands_rx) = mpsc::channel(100); - + let test_data = TestFetchable { data: vec![1, 2, 3], }; - + let peer_id = PeerId::random(); let request = Box::new(RequestedObject { object: test_data.clone(), source: Box::new(peer_id), }); - + let _handle = Fetcher::spawn( CancellationToken::new(), network_tx.clone(), @@ -193,13 +195,13 @@ async fn test_fetcher_single_request() { Arc::new(MockConnector), 10, ); - + // Send the request through the commands channel commands_tx.send(request).await.unwrap(); - + // Drop commands_tx to signal no more commands drop(commands_tx); - + // verify request is sent let request = network_rx.recv().await.unwrap(); match request { @@ -210,7 +212,7 @@ async fn test_fetcher_single_request() { } _ => panic!("Expected SendTo request"), } - + // Send successful response let tx = Transaction::random(32); let batch = Batch::new(vec![tx]); @@ -222,7 +224,7 @@ async fn test_fetcher_single_request() { sender: peer_id, }; sync_tx.send(received).unwrap(); - + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; } @@ -231,22 +233,20 @@ async fn test_fetcher_timeout() { let (requests_tx, _) = mpsc::channel(100); let (responses_tx, mut responses_rx) = broadcast::channel(100); let (commands_tx, commands_rx) = mpsc::channel(100); - + let test_data = TestFetchable { data: vec![1, 2, 3], }; - + let provider = Box::new(TestDataProvider { - peers: vec![ - PeerId::random() - ], + peers: vec![PeerId::random()], }); - + let requested_object = RequestedObject { object: test_data, source: provider, }; - + let handle = Fetcher::spawn( CancellationToken::new(), requests_tx, @@ -256,19 +256,16 @@ async fn test_fetcher_timeout() { // Set a very short timeout for individual fetch operations 1, ); - + // Send the request through the commands channel commands_tx.send(Box::new(requested_object)).await.unwrap(); - + // Drop commands_tx to signal no more commands drop(commands_tx); - + // Run fetcher with a longer timeout to ensure it has time to process - let result = tokio::time::timeout( - tokio::time::Duration::from_millis(1000), - handle - ).await; - + let result = tokio::time::timeout(tokio::time::Duration::from_millis(1000), handle).await; + // The fetcher should complete successfully, but the fetch operation should have timed out assert!(result.is_ok(), "Fetcher should complete"); } @@ -278,22 +275,20 @@ async fn test_fetcher_error_response() { let (requests_tx, _) = mpsc::channel(100); let (responses_tx, mut responses_rx) = broadcast::channel(100); let (commands_tx, commands_rx) = mpsc::channel(100); - + let test_data = TestFetchable { data: vec![1, 2, 3], }; - + let provider = Box::new(TestDataProvider { - peers: vec![ - PeerId::random() - ], + peers: vec![PeerId::random()], }); - + let requested_object = RequestedObject { object: test_data.clone(), source: provider, }; - + let _handle = Fetcher::spawn( CancellationToken::new(), requests_tx, @@ -302,13 +297,13 @@ async fn test_fetcher_error_response() { Arc::new(MockConnector), 10, ); - + // Send the request through the commands channel commands_tx.send(Box::new(requested_object)).await.unwrap(); - + // Drop commands_tx to signal no more commands drop(commands_tx); - + // Send failure response let request_id = test_data.into_sync_request().digest(); let response = SyncResponse::Failure(request_id); @@ -317,7 +312,7 @@ async fn test_fetcher_error_response() { sender: PeerId::random(), }; responses_tx.send(received).unwrap(); - + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; } @@ -326,23 +321,23 @@ async fn test_fetcher_multiple_peers() { let (requests_tx, mut requests_rx) = mpsc::channel(100); let (responses_tx, responses_rx) = broadcast::channel(100); let (commands_tx, commands_rx) = mpsc::channel(100); - + let test_data = TestFetchable { data: vec![1, 2, 3], }; - + let peer1 = PeerId::random(); let peer2 = PeerId::random(); let peer3 = PeerId::random(); let provider = Box::new(TestDataProvider { peers: vec![peer1, peer2, peer3], }); - + let requested_object = RequestedObject { object: test_data.clone(), source: provider.clone(), }; - + let _handle = Fetcher::spawn( CancellationToken::new(), requests_tx, @@ -351,13 +346,13 @@ async fn test_fetcher_multiple_peers() { Arc::new(MockConnector), 10, ); - + // Send the request through the commands channel commands_tx.send(Box::new(requested_object)).await.unwrap(); - + // Drop commands_tx to signal no more commands drop(commands_tx); - + // Verify first request is sent to peer1 let request = requests_rx.recv().await.unwrap(); match request { @@ -365,7 +360,7 @@ async fn test_fetcher_multiple_peers() { assert_eq!(pid, peer1, "First request should be sent to peer1"); let expected_digest = blake3::hash(&test_data.bytes()).into(); assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); - + // Send failure response from peer1 let request_id = test_data.into_sync_request().digest(); let response = SyncResponse::Failure(request_id); @@ -377,7 +372,7 @@ async fn test_fetcher_multiple_peers() { } _ => panic!("Expected SendTo request with SyncRequest payload"), }; - + // Verify second request is sent to peer2 let request = requests_rx.recv().await.unwrap(); match request { @@ -385,7 +380,7 @@ async fn test_fetcher_multiple_peers() { assert_eq!(pid, peer2, "Second request should be sent to peer2"); let expected_digest = blake3::hash(&test_data.bytes()).into(); assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); - + // Send failure response from peer2 let request_id = test_data.into_sync_request().digest(); let response = SyncResponse::Failure(request_id); @@ -397,7 +392,7 @@ async fn test_fetcher_multiple_peers() { } _ => panic!("Expected SendTo request with SyncRequest payload"), }; - + // Verify third request is sent to peer3 let request = requests_rx.recv().await.unwrap(); match request { @@ -405,7 +400,7 @@ async fn test_fetcher_multiple_peers() { assert_eq!(pid, peer3, "Third request should be sent to peer3"); let expected_digest = blake3::hash(&test_data.bytes()).into(); assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); - + // Send successful response from peer3 let tx = Transaction::random(32); let batch = Batch::new(vec![tx]); @@ -420,8 +415,11 @@ async fn test_fetcher_multiple_peers() { } _ => panic!("Expected SendTo request with SyncRequest payload"), }; - + // Verify no more requests are sent after successful response tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - assert!(requests_rx.try_recv().is_err(), "No more requests should be sent after successful response"); -} \ No newline at end of file + assert!( + requests_rx.try_recv().is_err(), + "No more requests should be sent after successful response" + ); +} diff --git a/src/synchronizer/tests/synchronizer_tests.rs b/src/synchronizer/tests/synchronizer_tests.rs index 46c9152..af5b6b8 100644 --- a/src/synchronizer/tests/synchronizer_tests.rs +++ b/src/synchronizer/tests/synchronizer_tests.rs @@ -2,12 +2,14 @@ use crate::{ network::Connect, synchronizer::{ fetcher::Fetcher, - traits::{DataProvider, IntoSyncRequest, Fetch}, + traits::{DataProvider, Fetch, IntoSyncRequest}, RequestedObject, }, types::{ batch::{Batch, BatchId}, - network::{NetworkRequest, ReceivedObject, RequestPayload, SyncData, SyncRequest, SyncResponse}, + network::{ + NetworkRequest, ReceivedObject, RequestPayload, SyncData, SyncRequest, SyncResponse, + }, traits::{AsBytes, Hash, Random}, transaction::Transaction, Digest, @@ -15,10 +17,10 @@ use crate::{ }; use async_trait::async_trait; use libp2p::PeerId; +use rstest::*; use std::{collections::HashSet, sync::Arc}; use tokio::sync::{broadcast, mpsc}; use tokio_util::sync::CancellationToken; -use rstest::*; struct MockConnector; @@ -80,9 +82,15 @@ fn test_data() -> MockData { #[fixture] fn test_data_set() -> Vec { vec![ - MockData { data: vec![1, 2, 3] }, - MockData { data: vec![4, 5, 6] }, - MockData { data: vec![7, 8, 9] }, + MockData { + data: vec![1, 2, 3], + }, + MockData { + data: vec![4, 5, 6], + }, + MockData { + data: vec![7, 8, 9], + }, ] } @@ -98,7 +106,14 @@ fn channels() -> ( let (network_tx, network_rx) = mpsc::channel(100); let (sync_tx, sync_rx) = broadcast::channel(100); let (commands_tx, commands_rx) = mpsc::channel(100); - (network_tx, network_rx, sync_tx, sync_rx, commands_tx, commands_rx) + ( + network_tx, + network_rx, + sync_tx, + sync_rx, + commands_tx, + commands_rx, + ) } #[fixture] @@ -166,16 +181,16 @@ async fn test_synchronizer_invalid_response_data( ) { let (network_tx, mut network_rx, sync_tx, sync_rx, commands_tx, commands_rx) = channels; let (peer_id, peer_id2, _) = peers; - + let provider = Box::new(MockDataProvider { peers: vec![peer_id, peer_id2], }); - + let requested_object = TestRequestedObject { object: test_data.clone(), source: provider, }; - + let token = CancellationToken::new(); let handle = Fetcher::spawn( token.clone(), @@ -185,29 +200,34 @@ async fn test_synchronizer_invalid_response_data( Arc::new(MockConnector), 10, ); - + // Send the request through the commands channel - commands_tx.send(Box::new(requested_object) as BoxedFetch).await.unwrap(); - + commands_tx + .send(Box::new(requested_object) as BoxedFetch) + .await + .unwrap(); + // Verify initial request is sent with timeout - let initial_request = tokio::time::timeout( - tokio::time::Duration::from_secs(5), - network_rx.recv() - ).await.expect("Timed out waiting for initial request") - .expect("Channel closed unexpectedly"); + let initial_request = + tokio::time::timeout(tokio::time::Duration::from_secs(5), network_rx.recv()) + .await + .expect("Timed out waiting for initial request") + .expect("Channel closed unexpectedly"); match initial_request { NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(sync_req)) => { assert_eq!(pid, peer_id); let expected_digest = blake3::hash(&test_data.bytes()).into(); assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); - + // Send invalid response let response = create_invalid_response(); - sync_tx.send(TestReceivedObject { - object: response, - sender: pid, - }).unwrap(); + sync_tx + .send(TestReceivedObject { + object: response, + sender: pid, + }) + .unwrap(); } _ => panic!("Expected initial SendTo request with SyncRequest payload"), } @@ -216,31 +236,36 @@ async fn test_synchronizer_invalid_response_data( tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; // Verify retry request is sent with timeout - let retry_request = tokio::time::timeout( - tokio::time::Duration::from_secs(5), - network_rx.recv() - ).await.expect("Timed out waiting for retry request") - .expect("Channel closed unexpectedly"); + let retry_request = + tokio::time::timeout(tokio::time::Duration::from_secs(5), network_rx.recv()) + .await + .expect("Timed out waiting for retry request") + .expect("Channel closed unexpectedly"); match retry_request { NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(_)) => { assert_eq!(pid, peer_id2, "Retry should use the second peer"); - + // Send valid response from second peer to complete the test let request_id = test_data.into_sync_request().digest(); let response = create_valid_response(request_id); - sync_tx.send(TestReceivedObject { - object: response, - sender: peer_id2, - }).unwrap(); + sync_tx + .send(TestReceivedObject { + object: response, + sender: peer_id2, + }) + .unwrap(); } _ => panic!("Expected retry request with SyncRequest payload"), } // Verify no more requests are sent tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - assert!(network_rx.try_recv().is_err(), "No more requests should be sent after successful response"); - + assert!( + network_rx.try_recv().is_err(), + "No more requests should be sent after successful response" + ); + // Clean shutdown drop(commands_tx); token.cancel(); @@ -263,16 +288,16 @@ async fn test_synchronizer_multiple_peers( ) { let (network_tx, mut network_rx, sync_tx, sync_rx, commands_tx, commands_rx) = channels; let (peer1, peer2, peer3) = peers; - + let provider = Box::new(MockDataProvider { peers: vec![peer1, peer2, peer3], }); - + let requested_object = TestRequestedObject { object: test_data.clone(), source: provider.clone(), }; - + let token = CancellationToken::new(); let handle = Fetcher::spawn( token.clone(), @@ -282,10 +307,13 @@ async fn test_synchronizer_multiple_peers( Arc::new(MockConnector), 10, ); - + // Send the request through the commands channel - commands_tx.send(Box::new(requested_object) as BoxedFetch).await.unwrap(); - + commands_tx + .send(Box::new(requested_object) as BoxedFetch) + .await + .unwrap(); + // Verify first request is sent let request = network_rx.recv().await.unwrap(); match request { @@ -293,22 +321,27 @@ async fn test_synchronizer_multiple_peers( assert_eq!(pid, peer1); let expected_digest = blake3::hash(&test_data.bytes()).into(); assert_eq!(sync_req, SyncRequest::Batches(vec![expected_digest])); - + // Send successful response let request_id = test_data.into_sync_request().digest(); let response = create_valid_response(request_id); - sync_tx.send(TestReceivedObject { - object: response, - sender: peer1, - }).unwrap(); + sync_tx + .send(TestReceivedObject { + object: response, + sender: peer1, + }) + .unwrap(); } _ => panic!("Expected SendTo request with SyncRequest payload"), } - + // Verify no more requests are sent tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - assert!(network_rx.try_recv().is_err(), "No more requests should be sent after successful response"); - + assert!( + network_rx.try_recv().is_err(), + "No more requests should be sent after successful response" + ); + // Clean shutdown drop(commands_tx); token.cancel(); @@ -331,7 +364,7 @@ async fn test_synchronizer_concurrent_requests( ) { let (network_tx, mut network_rx, sync_tx, sync_rx, commands_tx, commands_rx) = channels; let (peer1, peer2, peer3) = peers; - + let token = CancellationToken::new(); let handle = Fetcher::spawn( token.clone(), @@ -341,34 +374,40 @@ async fn test_synchronizer_concurrent_requests( Arc::new(MockConnector), 10, ); - + // Send multiple requests concurrently and track their digests let mut expected_digests = HashSet::new(); for test_data in test_data_set.iter() { let provider = Box::new(MockDataProvider { peers: vec![peer1, peer2, peer3], }); - + let requested_object = TestRequestedObject { object: test_data.clone(), source: provider, }; - + // Store the expected digest let sync_req = test_data.into_sync_request(); if let SyncRequest::Batches(digests) = sync_req { expected_digests.insert(digests[0]); } - - commands_tx.send(Box::new(requested_object) as BoxedFetch).await.unwrap(); + + commands_tx + .send(Box::new(requested_object) as BoxedFetch) + .await + .unwrap(); } - + // Verify all requests are processed let mut received_digests = HashSet::new(); for _ in 0..test_data_set.len() { let request = network_rx.recv().await.unwrap(); match request { - NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(SyncRequest::Batches(digests))) => { + NetworkRequest::SendTo( + pid, + RequestPayload::SyncRequest(SyncRequest::Batches(digests)), + ) => { let digest = digests[0]; assert!( expected_digests.contains(&digest), @@ -377,18 +416,20 @@ async fn test_synchronizer_concurrent_requests( expected_digests ); received_digests.insert(digest); - + // Send successful response let response = create_valid_response(digest); - sync_tx.send(TestReceivedObject { - object: response, - sender: pid, - }).unwrap(); + sync_tx + .send(TestReceivedObject { + object: response, + sender: pid, + }) + .unwrap(); } _ => panic!("Expected SendTo request with SyncRequest payload"), } } - + // Verify we received requests for all expected digests assert_eq!( received_digests.len(), @@ -403,11 +444,14 @@ async fn test_synchronizer_concurrent_requests( expected_digests.is_subset(&received_digests), "All expected digests should be requested" ); - + // Verify no more requests are sent tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - assert!(network_rx.try_recv().is_err(), "No more requests should be sent after all responses"); - + assert!( + network_rx.try_recv().is_err(), + "No more requests should be sent after all responses" + ); + // Clean shutdown drop(commands_tx); token.cancel(); @@ -430,16 +474,16 @@ async fn test_synchronizer_retry_on_failure( ) { let (network_tx, mut network_rx, sync_tx, sync_rx, commands_tx, commands_rx) = channels; let (peer1, peer2, _) = peers; - + let provider = Box::new(MockDataProvider { peers: vec![peer1, peer2], }); - + let requested_object = TestRequestedObject { object: test_data.clone(), source: provider, }; - + let token = CancellationToken::new(); let handle = Fetcher::spawn( token.clone(), @@ -449,51 +493,61 @@ async fn test_synchronizer_retry_on_failure( Arc::new(MockConnector), 10, ); - + // Send the request through the commands channel - commands_tx.send(Box::new(requested_object) as BoxedFetch).await.unwrap(); - + commands_tx + .send(Box::new(requested_object) as BoxedFetch) + .await + .unwrap(); + // Verify first request is sent let request = network_rx.recv().await.unwrap(); match request { NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(_)) => { assert_eq!(pid, peer1, "First request should be sent to peer1"); - + // Send failure response let request_id = test_data.into_sync_request().digest(); let response = SyncResponse::Failure(request_id); - sync_tx.send(TestReceivedObject { - object: response, - sender: peer1, - }).unwrap(); + sync_tx + .send(TestReceivedObject { + object: response, + sender: peer1, + }) + .unwrap(); } _ => panic!("Expected SendTo request with SyncRequest payload"), } - + // Verify retry request is sent to second peer let retry_request = network_rx.recv().await.unwrap(); match retry_request { NetworkRequest::SendTo(pid, RequestPayload::SyncRequest(_)) => { assert_eq!(pid, peer2, "Retry should be sent to peer2"); assert_ne!(pid, peer1, "Retry should use a different peer"); - + // Send successful response from second peer let request_id = test_data.into_sync_request().digest(); let response = create_valid_response(request_id); - sync_tx.send(TestReceivedObject { - object: response, - sender: peer2, - }).unwrap(); + sync_tx + .send(TestReceivedObject { + object: response, + sender: peer2, + }) + .unwrap(); } _ => panic!("Expected retry request with SyncRequest payload"), } - + // Verify no more requests are sent tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - assert!(network_rx.try_recv().is_err(), "No more requests should be sent after successful response"); - + assert!( + network_rx.try_recv().is_err(), + "No more requests should be sent after successful response" + ); + // Clean shutdown drop(commands_tx); token.cancel(); let _ = handle.await; -} \ No newline at end of file +} diff --git a/src/types/tests/dag_tests.rs b/src/types/tests/dag_tests.rs index 3356fab..e319734 100644 --- a/src/types/tests/dag_tests.rs +++ b/src/types/tests/dag_tests.rs @@ -21,21 +21,21 @@ impl AsBytes for TestData { async fn test_dag_creation_and_basic_ops() { let base_layer: u64 = 0; let mut dag: Dag = Dag::new(base_layer); - + // Test initial state assert_eq!(dag.height(), base_layer); assert_eq!(dag.base_layer(), base_layer); - + // Test vertex insertion let data = TestData { value: 1 }; let parents = HashSet::new(); let vertex = Vertex::from_data(data, 1, parents); let vertex_id = vertex.id().clone(); - + dag.insert(vertex).unwrap(); assert_eq!(dag.height(), 1); assert_eq!(dag.layer_size(1), 1); - + // Test vertex retrieval let retrieved = dag.get(&vertex_id).unwrap(); assert_eq!(retrieved.data().value, 1); @@ -45,31 +45,31 @@ async fn test_dag_creation_and_basic_ops() { #[tokio::test] async fn test_dag_parent_child_relationships() { let mut dag: Dag = Dag::new(0); - + // Create parent vertex let parent_data = TestData { value: 1 }; let parent_vertex = Vertex::from_data(parent_data, 1, HashSet::new()); let parent_id = parent_vertex.id().clone(); dag.insert(parent_vertex).unwrap(); - + // Create child vertex with parent reference let mut parents = HashSet::new(); parents.insert(parent_id); let child_data = TestData { value: 2 }; let child_vertex = Vertex::from_data(child_data, 2, parents); - + dag.insert_checked(child_vertex).unwrap(); } #[tokio::test] async fn test_dag_invalid_parent() { let mut dag: Dag = Dag::new(0); - + let mut parents = HashSet::new(); parents.insert("non_existent_parent".to_string()); let data = TestData { value: 1 }; let vertex = Vertex::from_data(data, 1, parents); - + match dag.insert_checked(vertex) { Err(DagError::MissingParents(_)) => (), _ => panic!("Expected MissingParents error"), @@ -79,19 +79,19 @@ async fn test_dag_invalid_parent() { #[tokio::test] async fn test_dag_layer_operations() { let mut dag: Dag = Dag::new(0); - + // Insert vertices in different layers for i in 1..=3 { let data = TestData { value: i }; let vertex = Vertex::from_data(data, i as u64, HashSet::new()); dag.insert(vertex).unwrap(); } - + // Test layer queries assert_eq!(dag.layer_size(1), 1); assert_eq!(dag.layer_size(2), 1); assert_eq!(dag.layer_size(3), 1); - + let layer_2_vertices = dag.layer_vertices(2); assert_eq!(layer_2_vertices.len(), 1); assert_eq!(layer_2_vertices[0].data().value, 2); @@ -100,27 +100,27 @@ async fn test_dag_layer_operations() { #[tokio::test] async fn test_dag_multiple_parents() { let mut dag: Dag = Dag::new(0); - + // Create two parent vertices let parent1_data = TestData { value: 1 }; let parent2_data = TestData { value: 2 }; let parent1_vertex = Vertex::from_data(parent1_data, 1, HashSet::new()); let parent2_vertex = Vertex::from_data(parent2_data, 1, HashSet::new()); - + let parent1_id = parent1_vertex.id().clone(); let parent2_id = parent2_vertex.id().clone(); - + dag.insert(parent1_vertex).unwrap(); dag.insert(parent2_vertex).unwrap(); - + // Create child with multiple parents let mut parents = HashSet::new(); parents.insert(parent1_id); parents.insert(parent2_id); - + let child_data = TestData { value: 3 }; let child_vertex = Vertex::from_data(child_data, 2, parents); - + dag.insert_checked(child_vertex).unwrap(); assert_eq!(dag.layer_size(2), 1); } @@ -128,60 +128,60 @@ async fn test_dag_multiple_parents() { #[tokio::test] async fn test_dag_cyclic_insertion_prevention() { let mut dag: Dag = Dag::new(0); - + // Create first vertex let data1 = TestData { value: 1 }; let vertex1 = Vertex::from_data(data1, 1, HashSet::new()); let vertex1_id = vertex1.id().clone(); dag.insert(vertex1).unwrap(); - + // Try to create a vertex in a lower layer referencing a higher layer let mut parents = HashSet::new(); parents.insert(vertex1_id); let data2 = TestData { value: 2 }; let vertex2 = Vertex::from_data(data2, 0, parents); - + assert!(dag.insert_checked(vertex2).is_err()); } #[tokio::test] async fn test_dag_complex_hierarchy() { let mut dag: Dag = Dag::new(0); - + // Layer 1: Two vertices let vertex1_1 = Vertex::from_data(TestData { value: 11 }, 1, HashSet::new()); let vertex1_2 = Vertex::from_data(TestData { value: 12 }, 1, HashSet::new()); let id1_1 = vertex1_1.id().clone(); let id1_2 = vertex1_2.id().clone(); - + dag.insert(vertex1_1).unwrap(); dag.insert(vertex1_2).unwrap(); - + // Layer 2: Two vertices, each with one parent let mut parents2_1 = HashSet::new(); parents2_1.insert(id1_1.clone()); let mut parents2_2 = HashSet::new(); parents2_2.insert(id1_2.clone()); - + let vertex2_1 = Vertex::from_data(TestData { value: 21 }, 2, parents2_1); let vertex2_2 = Vertex::from_data(TestData { value: 22 }, 2, parents2_2); let id2_1 = vertex2_1.id().clone(); let id2_2 = vertex2_2.id().clone(); - + dag.insert_checked(vertex2_1).unwrap(); dag.insert_checked(vertex2_2).unwrap(); - + // Layer 3: One vertex with both layer 2 vertices as parents let mut parents3 = HashSet::new(); parents3.insert(id2_1); parents3.insert(id2_2); - + let vertex3 = Vertex::from_data(TestData { value: 31 }, 3, parents3); dag.insert_checked(vertex3).unwrap(); - + // Verify the structure assert_eq!(dag.layer_size(1), 2); assert_eq!(dag.layer_size(2), 2); assert_eq!(dag.layer_size(3), 1); assert_eq!(dag.height(), 3); -} \ No newline at end of file +} From 7e256d2b466f5fb41cea30c646352301bc74e4ee Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Mon, 13 Jan 2025 14:12:08 +0530 Subject: [PATCH 13/15] nits --- src/synchronizer/tests/fetcher_tests.rs | 12 +++++------- src/types/tests/dag_tests.rs | 3 +-- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/src/synchronizer/tests/fetcher_tests.rs b/src/synchronizer/tests/fetcher_tests.rs index 0959368..9ad94dd 100644 --- a/src/synchronizer/tests/fetcher_tests.rs +++ b/src/synchronizer/tests/fetcher_tests.rs @@ -1,5 +1,4 @@ -use rand::random; -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; use tokio::sync::{broadcast, mpsc}; use tokio_util::sync::CancellationToken; @@ -7,7 +6,7 @@ use crate::{ network::Connect, synchronizer::{ fetcher::Fetcher, - traits::{DataProvider, Fetch, IntoSyncRequest}, + traits::{DataProvider, IntoSyncRequest}, RequestedObject, }, types::{ @@ -17,7 +16,6 @@ use crate::{ }, traits::{AsBytes, Hash, Random}, transaction::Transaction, - Digest, }, }; @@ -231,7 +229,7 @@ async fn test_fetcher_single_request() { #[tokio::test] async fn test_fetcher_timeout() { let (requests_tx, _) = mpsc::channel(100); - let (responses_tx, mut responses_rx) = broadcast::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); let (commands_tx, commands_rx) = mpsc::channel(100); let test_data = TestFetchable { @@ -264,7 +262,7 @@ async fn test_fetcher_timeout() { drop(commands_tx); // Run fetcher with a longer timeout to ensure it has time to process - let result = tokio::time::timeout(tokio::time::Duration::from_millis(1000), handle).await; + let result = tokio::time::timeout(tokio::time::Duration::from_millis(100), handle).await; // The fetcher should complete successfully, but the fetch operation should have timed out assert!(result.is_ok(), "Fetcher should complete"); @@ -273,7 +271,7 @@ async fn test_fetcher_timeout() { #[tokio::test] async fn test_fetcher_error_response() { let (requests_tx, _) = mpsc::channel(100); - let (responses_tx, mut responses_rx) = broadcast::channel(100); + let (responses_tx, responses_rx) = broadcast::channel(100); let (commands_tx, commands_rx) = mpsc::channel(100); let test_data = TestFetchable { diff --git a/src/types/tests/dag_tests.rs b/src/types/tests/dag_tests.rs index e319734..4ba8676 100644 --- a/src/types/tests/dag_tests.rs +++ b/src/types/tests/dag_tests.rs @@ -2,8 +2,7 @@ use std::collections::HashSet; use crate::types::{ dag::{Dag, DagError, Vertex}, - traits::{AsBytes, Hash}, - Digest, + traits::AsBytes, }; #[derive(Clone, Debug)] From 033664881bf02bf1e8df991c15bf4e293ce25ccd Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Mon, 13 Jan 2025 15:13:57 +0530 Subject: [PATCH 14/15] multiple round nits --- src/primary/tests/header_tests.rs | 89 ++++++++++++++++++++----- src/synchronizer/tests/fetcher_tests.rs | 3 +- 2 files changed, 73 insertions(+), 19 deletions(-) diff --git a/src/primary/tests/header_tests.rs b/src/primary/tests/header_tests.rs index 3f4b990..aa4db62 100644 --- a/src/primary/tests/header_tests.rs +++ b/src/primary/tests/header_tests.rs @@ -1,29 +1,27 @@ -use std::{collections::HashSet, sync::Arc, time::Duration}; - -use libp2p::{identity::ed25519::Keypair, PeerId}; -use tokio::{ - sync::{broadcast, mpsc, watch}, - time::sleep, -}; -use tokio_util::sync::CancellationToken; - use crate::{ db::Db, primary::header_builder::{wait_for_quorum, HeaderBuilder}, settings::parser::Committee, types::{ batch::BatchId, - block_header::BlockHeader, + block_header::{BlockHeader, HeaderId}, certificate::Certificate, - network::{NetworkRequest, ReceivedObject}, + network::{NetworkRequest, ReceivedObject, RequestPayload}, signing::Signable, sync::SyncStatus, - traits::AsBytes, + traits::{AsBytes, Hash}, vote::Vote, Round, }, utils::CircularBuffer, }; +use std::{collections::HashSet, sync::Arc, time::Duration}; +use libp2p::{identity::ed25519::Keypair, PeerId}; +use tokio::{ + sync::{broadcast, mpsc, watch}, + time::sleep, +}; +use tokio_util::sync::CancellationToken; // test helper functions impl BlockHeader { @@ -214,7 +212,7 @@ async fn test_header_builder_with_empty_digests() { #[tokio::test] async fn test_header_builder_multiple_rounds() { - let (network_tx, _) = mpsc::channel(100); + let (network_tx, mut network_rx) = mpsc::channel(100); let (certificate_tx, mut cert_rx) = mpsc::channel(100); let keypair = Keypair::generate(); let db = Arc::new(Db::new_in_memory().await.unwrap()); @@ -228,24 +226,79 @@ async fn test_header_builder_multiple_rounds() { CancellationToken::new(), network_tx, certificate_tx, - keypair, + keypair.clone(), db, header_trigger_rx, votes_rx, digests_buffer, - committee, + committee.clone(), sync_status_rx, ); - // trigger multiple rounds + // trigger multiple rounds and verify header building for round in 1..=3 { + // Create certificates for this round let mut certs = HashSet::new(); - certs.insert(Certificate::genesis([round as u8; 32])); + let cert = Certificate::genesis([round as u8; 32]); + certs.insert(cert.clone()); + + // Trigger header building header_trigger_tx.send((round, certs)).unwrap(); - sleep(Duration::from_millis(100)).await; + + // Wait for header broadcast + let mut header_received = false; + while !header_received { + let network_request = tokio::time::timeout( + Duration::from_secs(5), + network_rx.recv() + ).await.expect("Timed out waiting for network request") + .expect("Network channel closed unexpectedly"); + + match network_request { + NetworkRequest::BroadcastCounterparts(RequestPayload::Header(header)) => { + // Verify header round + assert_eq!(header.round, round); + + // Create and send enough votes to reach quorum (3 votes needed) + for _ in 0..3 { + let voting_keypair = Keypair::generate(); // Different keypair for each vote + let vote = Vote::from_header(header.clone(), &voting_keypair).unwrap(); + votes_tx.send(ReceivedObject { + object: vote, + sender: PeerId::random(), + }).unwrap(); + } + + // Wait for certificate with timeout + let certificate = tokio::time::timeout( + Duration::from_secs(5), + cert_rx.recv() + ).await.expect("Timed out waiting for certificate") + .expect("Certificate channel closed unexpectedly"); + + // Verify certificate + assert_eq!(certificate.round(), round); + assert!(certificate.header().is_some(), "Certificate should have a header"); + let header_id: HeaderId = header.id().into(); + assert_eq!(certificate.header().unwrap(), header_id); + + header_received = true; + } + NetworkRequest::BroadcastCounterparts(RequestPayload::Certificate(_)) => { + // Ignore certificate broadcasts, we verify them through the cert_rx channel + continue; + } + _ => panic!("Unexpected network request: {:?}", network_request), + } + } + + // Give some time for cleanup between rounds + sleep(Duration::from_millis(50)).await; } + // Clean shutdown handle.abort(); + sleep(Duration::from_millis(50)).await; } #[tokio::test] diff --git a/src/synchronizer/tests/fetcher_tests.rs b/src/synchronizer/tests/fetcher_tests.rs index 9ad94dd..068d81d 100644 --- a/src/synchronizer/tests/fetcher_tests.rs +++ b/src/synchronizer/tests/fetcher_tests.rs @@ -22,6 +22,7 @@ use crate::{ use async_trait::async_trait; use libp2p::PeerId; +// Mock connector that allows testing network-related logic without real network dependencies #[derive(Clone)] struct MockConnector; @@ -39,7 +40,7 @@ impl Connect for Arc { } } -// Create a mock connector that sleeps to simulate network delay +// Mock connector that sleeps to simulate network delay #[derive(Clone)] struct SlowMockConnector; From 3681ba74a93dfd40fe39561ac8fb914f2e6be547 Mon Sep 17 00:00:00 2001 From: Harsh Pratap Singh Date: Mon, 13 Jan 2025 15:17:31 +0530 Subject: [PATCH 15/15] fmt nits --- src/primary/tests/header_tests.rs | 47 ++++++++++++++++--------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/src/primary/tests/header_tests.rs b/src/primary/tests/header_tests.rs index aa4db62..72b467a 100644 --- a/src/primary/tests/header_tests.rs +++ b/src/primary/tests/header_tests.rs @@ -15,8 +15,8 @@ use crate::{ }, utils::CircularBuffer, }; -use std::{collections::HashSet, sync::Arc, time::Duration}; use libp2p::{identity::ed25519::Keypair, PeerId}; +use std::{collections::HashSet, sync::Arc, time::Duration}; use tokio::{ sync::{broadcast, mpsc, watch}, time::sleep, @@ -241,47 +241,50 @@ async fn test_header_builder_multiple_rounds() { let mut certs = HashSet::new(); let cert = Certificate::genesis([round as u8; 32]); certs.insert(cert.clone()); - + // Trigger header building header_trigger_tx.send((round, certs)).unwrap(); - + // Wait for header broadcast let mut header_received = false; while !header_received { - let network_request = tokio::time::timeout( - Duration::from_secs(5), - network_rx.recv() - ).await.expect("Timed out waiting for network request") - .expect("Network channel closed unexpectedly"); + let network_request = tokio::time::timeout(Duration::from_secs(5), network_rx.recv()) + .await + .expect("Timed out waiting for network request") + .expect("Network channel closed unexpectedly"); match network_request { NetworkRequest::BroadcastCounterparts(RequestPayload::Header(header)) => { // Verify header round assert_eq!(header.round, round); - + // Create and send enough votes to reach quorum (3 votes needed) for _ in 0..3 { let voting_keypair = Keypair::generate(); // Different keypair for each vote let vote = Vote::from_header(header.clone(), &voting_keypair).unwrap(); - votes_tx.send(ReceivedObject { - object: vote, - sender: PeerId::random(), - }).unwrap(); + votes_tx + .send(ReceivedObject { + object: vote, + sender: PeerId::random(), + }) + .unwrap(); } - + // Wait for certificate with timeout - let certificate = tokio::time::timeout( - Duration::from_secs(5), - cert_rx.recv() - ).await.expect("Timed out waiting for certificate") - .expect("Certificate channel closed unexpectedly"); + let certificate = tokio::time::timeout(Duration::from_secs(5), cert_rx.recv()) + .await + .expect("Timed out waiting for certificate") + .expect("Certificate channel closed unexpectedly"); // Verify certificate assert_eq!(certificate.round(), round); - assert!(certificate.header().is_some(), "Certificate should have a header"); + assert!( + certificate.header().is_some(), + "Certificate should have a header" + ); let header_id: HeaderId = header.id().into(); assert_eq!(certificate.header().unwrap(), header_id); - + header_received = true; } NetworkRequest::BroadcastCounterparts(RequestPayload::Certificate(_)) => { @@ -291,7 +294,7 @@ async fn test_header_builder_multiple_rounds() { _ => panic!("Unexpected network request: {:?}", network_request), } } - + // Give some time for cleanup between rounds sleep(Duration::from_millis(50)).await; }