Skip to content

Commit

Permalink
Address review comments.
Browse files Browse the repository at this point in the history
  • Loading branch information
jimmygchen committed Jan 30, 2025
1 parent 8cb49c7 commit ce8090d
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 15 deletions.
4 changes: 2 additions & 2 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1281,8 +1281,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) {
if let Some(columns) = self.store.get_data_columns(block_root)? {
let num_required_columns = self.spec.number_of_columns / 2;
let blobs_available = columns.len() >= num_required_columns as usize;
if blobs_available {
let reconstruction_possible = columns.len() >= num_required_columns as usize;
if reconstruction_possible {
reconstruct_blobs(&self.kzg, &columns, None, &block, &self.spec)
.map(Some)
.map_err(Error::FailedToReconstructBlobs)
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/tests/store_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2300,7 +2300,7 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
let temp1 = tempdir().unwrap();
let full_store = get_store(&temp1);

// Run a supernode so the node has full blobs stored.
// TODO(das): Run a supernode so the node has full blobs stored.
// This may not be required in the future if we end up implementing downloading checkpoint
// blobs from p2p peers:
// https://github.com/sigp/lighthouse/issues/6837
Expand Down
4 changes: 2 additions & 2 deletions beacon_node/lighthouse_network/src/rpc/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ impl SupportedProtocol {
ProtocolId::new(Self::BlocksByRootV1, Encoding::SSZSnappy),
ProtocolId::new(Self::PingV1, Encoding::SSZSnappy),
];
if fork_context.fork_exists(ForkName::Fulu) {
if fork_context.spec.is_peer_das_scheduled() {
supported.extend_from_slice(&[
// V3 variants have higher preference for protocol negotation
ProtocolId::new(Self::MetaDataV3, Encoding::SSZSnappy),
Expand All @@ -405,7 +405,7 @@ impl SupportedProtocol {
ProtocolId::new(SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy),
]);
}
if fork_context.fork_exists(ForkName::Fulu) {
if fork_context.spec.is_peer_das_scheduled() {
supported.extend_from_slice(&[
ProtocolId::new(SupportedProtocol::DataColumnsByRootV1, Encoding::SSZSnappy),
ProtocolId::new(SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy),
Expand Down
17 changes: 7 additions & 10 deletions beacon_node/network/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ use lighthouse_network::{
MessageId, NetworkEvent, NetworkGlobals, PeerId,
};
use slog::{crit, debug, error, info, o, trace, warn};
use std::borrow::Cow;
use std::collections::BTreeSet;
use std::{collections::HashSet, pin::Pin, sync::Arc, time::Duration};
use store::HotColdDB;
Expand All @@ -34,8 +33,8 @@ use task_executor::ShutdownReason;
use tokio::sync::mpsc;
use tokio::time::Sleep;
use types::{
ChainSpec, DataColumnSubnetId, EthSpec, ForkContext, ForkName, Slot, SubnetId,
SyncCommitteeSubscription, SyncSubnetId, Unsigned, ValidatorSubscription,
ChainSpec, DataColumnSubnetId, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription,
SyncSubnetId, Unsigned, ValidatorSubscription,
};

mod tests;
Expand Down Expand Up @@ -752,7 +751,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
}
}

if self.fork_context.fork_exists(ForkName::Fulu) {
if self.fork_context.spec.is_peer_das_scheduled() {
self.subscribe_to_peer_das_topics(&mut subscribed_topics);
}

Expand Down Expand Up @@ -813,13 +812,11 @@ impl<T: BeaconChainTypes> NetworkService<T> {
/// `network.subscribe_new_fork_topics()`.
fn subscribe_to_peer_das_topics(&mut self, subscribed_topics: &mut Vec<GossipTopic>) {
let column_subnets_to_subscribe = if self.subscribe_all_data_column_subnets {
Cow::Owned(
(0..self.fork_context.spec.data_column_sidecar_subnet_count)
.map(DataColumnSubnetId::new)
.collect(),
)
&(0..self.fork_context.spec.data_column_sidecar_subnet_count)
.map(DataColumnSubnetId::new)
.collect()
} else {
Cow::Borrowed(&self.network_globals.sampling_subnets)
&self.network_globals.sampling_subnets
};

for column_subnet in column_subnets_to_subscribe.iter() {
Expand Down

0 comments on commit ce8090d

Please sign in to comment.