Skip to content

Commit

Permalink
Add more range sync tests (#6872)
Browse files Browse the repository at this point in the history
Currently we have very poor coverage of range sync with unit tests. With the event driven test framework we could cover much more ground and be confident when modifying the code.


  Add two basic cases:
- Happy path, complete a finalized sync for 2 epochs
- Post-PeerDAS case where we start without enough custody peers and later we find enough

⚠️  If you have ideas for more test cases, please let me know! I'll write them
  • Loading branch information
dapplion authored Feb 10, 2025
1 parent 62a0f25 commit d5a03c9
Show file tree
Hide file tree
Showing 9 changed files with 223 additions and 33 deletions.
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion beacon_node/lighthouse_network/src/peer_manager/peerdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -689,8 +689,8 @@ impl<E: EthSpec> PeerDB<E> {
&mut self,
supernode: bool,
spec: &ChainSpec,
enr_key: CombinedKey,
) -> PeerId {
let enr_key = CombinedKey::generate_secp256k1();
let mut enr = Enr::builder().build(&enr_key).unwrap();
let peer_id = enr.peer_id();

Expand Down
2 changes: 2 additions & 0 deletions beacon_node/network/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,10 @@ eth2 = { workspace = true }
eth2_network_config = { workspace = true }
genesis = { workspace = true }
gossipsub = { workspace = true }
k256 = "0.13.4"
kzg = { workspace = true }
matches = "0.1.8"
rand_chacha = "0.3.1"
serde_json = { workspace = true }
slog-async = { workspace = true }
slog-term = { workspace = true }
Expand Down
15 changes: 10 additions & 5 deletions beacon_node/network/src/sync/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,16 @@ impl<T: BeaconChainTypes> SyncManager<T> {
self.range_sync.state()
}

#[cfg(test)]
pub(crate) fn range_sync_state(&self) -> super::range_sync::SyncChainStatus {
self.range_sync.state()
}

#[cfg(test)]
pub(crate) fn __range_failed_chains(&mut self) -> Vec<Hash256> {
self.range_sync.__failed_chains()
}

#[cfg(test)]
pub(crate) fn get_failed_chains(&mut self) -> Vec<Hash256> {
self.block_lookups.get_failed_chains()
Expand All @@ -368,11 +378,6 @@ impl<T: BeaconChainTypes> SyncManager<T> {
self.sampling.get_request_status(block_root, index)
}

#[cfg(test)]
pub(crate) fn range_sync_state(&self) -> super::range_sync::SyncChainStatus {
self.range_sync.state()
}

#[cfg(test)]
pub(crate) fn update_execution_engine_state(&mut self, state: EngineState) {
self.handle_new_execution_engine_state(state);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ impl<T: BeaconChainTypes> ChainCollection<T> {
.find(|(_, chain)| chain.has_same_target(target_head_slot, target_head_root))
{
Some((&id, chain)) => {
debug!(self.log, "Adding peer to known chain"; "peer_id" => %peer, "sync_type" => ?sync_type, &chain);
debug!(self.log, "Adding peer to known chain"; "peer_id" => %peer, "sync_type" => ?sync_type, "id" => id);
debug_assert_eq!(chain.target_head_root, target_head_root);
debug_assert_eq!(chain.target_head_slot, target_head_slot);
if let Err(remove_reason) = chain.add_peer(network, peer) {
Expand Down
5 changes: 5 additions & 0 deletions beacon_node/network/src/sync/range_sync/range.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,11 @@ where
}
}

#[cfg(test)]
pub(crate) fn __failed_chains(&mut self) -> Vec<Hash256> {
self.failed_chains.keys().copied().collect()
}

pub fn state(&self) -> SyncChainStatus {
self.chains.state()
}
Expand Down
27 changes: 17 additions & 10 deletions beacon_node/network/src/sync/tests/lookups.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use beacon_chain::{
PayloadVerificationOutcome, PayloadVerificationStatus,
};
use beacon_processor::WorkEvent;
use lighthouse_network::discovery::CombinedKey;
use lighthouse_network::{
rpc::{RPCError, RequestType, RpcErrorResponse},
service::api_types::{
Expand All @@ -39,18 +40,16 @@ use lighthouse_network::{
use slog::info;
use slot_clock::{SlotClock, TestingSlotClock};
use tokio::sync::mpsc;
use types::ForkContext;
use types::{
data_column_sidecar::ColumnIndex,
test_utils::{SeedableRng, TestRandom, XorShiftRng},
BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, EthSpec, ForkName, Hash256,
MinimalEthSpec as E, SignedBeaconBlock, Slot,
BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, ForkName,
Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot,
};

const D: Duration = Duration::new(0, 0);
const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS;
const SAMPLING_REQUIRED_SUCCESSES: usize = 2;

type DCByRootIds = Vec<DCByRootId>;
type DCByRootId = (SyncRequestId, Vec<ColumnIndex>);

Expand Down Expand Up @@ -117,7 +116,9 @@ impl TestRig {

let spec = chain.spec.clone();

let rng = XorShiftRng::from_seed([42; 16]);
// deterministic seed
let rng = ChaCha20Rng::from_seed([0u8; 32]);

TestRig {
beacon_processor_rx,
beacon_processor_rx_queue: vec![],
Expand Down Expand Up @@ -154,7 +155,7 @@ impl TestRig {
}
}

fn test_setup_after_fulu() -> Option<Self> {
pub fn test_setup_after_fulu() -> Option<Self> {
let r = Self::test_setup();
if r.fork_name.fulu_enabled() {
Some(r)
Expand Down Expand Up @@ -369,20 +370,26 @@ impl TestRig {
}

pub fn new_connected_peer(&mut self) -> PeerId {
let key = self.determinstic_key();
self.network_globals
.peers
.write()
.__add_connected_peer_testing_only(false, &self.harness.spec)
.__add_connected_peer_testing_only(false, &self.harness.spec, key)
}

pub fn new_connected_supernode_peer(&mut self) -> PeerId {
let key = self.determinstic_key();
self.network_globals
.peers
.write()
.__add_connected_peer_testing_only(true, &self.harness.spec)
.__add_connected_peer_testing_only(true, &self.harness.spec, key)
}

fn determinstic_key(&mut self) -> CombinedKey {
k256::ecdsa::SigningKey::random(&mut self.rng).into()
}

fn new_connected_peers_for_peerdas(&mut self) {
pub fn new_connected_peers_for_peerdas(&mut self) {
// Enough sampling peers with few columns
for _ in 0..100 {
self.new_connected_peer();
Expand Down Expand Up @@ -1113,7 +1120,7 @@ impl TestRig {
}

#[track_caller]
fn expect_empty_network(&mut self) {
pub fn expect_empty_network(&mut self) {
self.drain_network_rx();
if !self.network_rx_queue.is_empty() {
let n = self.network_rx_queue.len();
Expand Down
5 changes: 3 additions & 2 deletions beacon_node/network/src/sync/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@ use beacon_chain::eth1_chain::CachingEth1Backend;
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
use beacon_processor::WorkEvent;
use lighthouse_network::NetworkGlobals;
use rand_chacha::ChaCha20Rng;
use slog::Logger;
use slot_clock::ManualSlotClock;
use std::sync::Arc;
use store::MemoryStore;
use tokio::sync::mpsc;
use types::{test_utils::XorShiftRng, ChainSpec, ForkName, MinimalEthSpec as E};
use types::{ChainSpec, ForkName, MinimalEthSpec as E};

mod lookups;
mod range;
Expand Down Expand Up @@ -61,7 +62,7 @@ struct TestRig {
/// Beacon chain harness
harness: BeaconChainHarness<EphemeralHarnessType<E>>,
/// `rng` for generating test blocks and blobs.
rng: XorShiftRng,
rng: ChaCha20Rng,
fork_name: ForkName,
log: Logger,
spec: Arc<ChainSpec>,
Expand Down
Loading

0 comments on commit d5a03c9

Please sign in to comment.