Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/stable' into back-merge-6.0.1
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelsproul committed Dec 16, 2024
2 parents c92c07f + 0d90135 commit a6de0d5
Show file tree
Hide file tree
Showing 17 changed files with 126 additions and 33 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ concurrency:
cancel-in-progress: true

env:
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DH_KEY }}
DOCKER_USERNAME: ${{ secrets.DH_ORG }}
# Enable self-hosted runners for the sigp repo only.
SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }}

Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ concurrency:
cancel-in-progress: true

env:
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DH_KEY }}
DOCKER_USERNAME: ${{ secrets.DH_ORG }}
REPO_NAME: ${{ github.repository_owner }}/lighthouse
IMAGE_NAME: ${{ github.repository_owner }}/lighthouse
# Enable self-hosted runners for the sigp repo only.
Expand Down
8 changes: 4 additions & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion beacon_node/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "beacon_node"
version = "6.0.0"
version = "6.0.1"
authors = [
"Paul Hauner <[email protected]>",
"Age Manning <[email protected]",
Expand Down
4 changes: 3 additions & 1 deletion beacon_node/beacon_chain/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1037,7 +1037,9 @@ where
);

// Check for states to reconstruct (in the background).
if beacon_chain.config.reconstruct_historic_states {
if beacon_chain.config.reconstruct_historic_states
&& beacon_chain.store.get_oldest_block_slot() == 0
{
beacon_chain.store_migrator.process_reconstruction();
}

Expand Down
6 changes: 4 additions & 2 deletions beacon_node/beacon_chain/src/migrate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,10 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200;
const COMPACTION_FINALITY_DISTANCE: u64 = 1024;
/// Maximum number of blocks applied in each reconstruction burst.
///
/// This limits the amount of time that the finalization migration is paused for.
const BLOCKS_PER_RECONSTRUCTION: usize = 8192 * 4;
/// This limits the amount of time that the finalization migration is paused for. We set this
/// conservatively because pausing the finalization migration for too long can cause hot state
/// cache misses and excessive disk use.
const BLOCKS_PER_RECONSTRUCTION: usize = 1024;

/// Default number of epochs to wait between finalization migrations.
pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ pub fn delete_old_schema_freezer_data<T: BeaconChainTypes>(
db.cold_db.do_atomically(cold_ops)?;

// In order to reclaim space, we need to compact the freezer DB as well.
db.cold_db.compact()?;
db.compact_freezer()?;

Ok(())
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,10 +141,6 @@ impl<E: EthSpec> NetworkBehaviour for PeerManager<E> {
debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %ClearDialError(error));
self.on_dial_failure(peer_id);
}
FromSwarm::ExternalAddrConfirmed(_) => {
// We have an external address confirmed, means we are able to do NAT traversal.
metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p"], 1);
}
_ => {
// NOTE: FromSwarm is a non exhaustive enum so updates should be based on release
// notes more than compiler feedback
Expand Down
9 changes: 6 additions & 3 deletions beacon_node/network/src/subnet_service/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ pub struct SubnetService<T: BeaconChainTypes> {
subscriptions: HashSetDelay<Subnet>,

/// Subscriptions that need to be executed in the future.
scheduled_subscriptions: HashSetDelay<Subnet>,
scheduled_subscriptions: HashSetDelay<ExactSubnet>,

/// A list of permanent subnets that this node is subscribed to.
// TODO: Shift this to a dynamic bitfield
Expand Down Expand Up @@ -484,8 +484,10 @@ impl<T: BeaconChainTypes> SubnetService<T> {
self.subscribe_to_subnet_immediately(subnet, slot + 1)?;
} else {
// This is a future slot, schedule subscribing.
// We need to include the slot to make the key unique to prevent overwriting the entry
// for the same subnet.
self.scheduled_subscriptions
.insert_at(subnet, time_to_subscription_start);
.insert_at(ExactSubnet { subnet, slot }, time_to_subscription_start);
}

Ok(())
Expand Down Expand Up @@ -626,7 +628,8 @@ impl<T: BeaconChainTypes> Stream for SubnetService<T> {
// Process scheduled subscriptions that might be ready, since those can extend a soon to
// expire subscription.
match self.scheduled_subscriptions.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(subnet))) => {
Poll::Ready(Some(Ok(exact_subnet))) => {
let ExactSubnet { subnet, .. } = exact_subnet;
let current_slot = self.beacon_chain.slot_clock.now().unwrap_or_default();
if let Err(e) = self.subscribe_to_subnet_immediately(subnet, current_slot + 1) {
debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet, "err" => e);
Expand Down
55 changes: 51 additions & 4 deletions beacon_node/network/src/subnet_service/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -500,12 +500,15 @@ mod test {
// subscription config
let committee_count = 1;

// Makes 2 validator subscriptions to the same subnet but at different slots.
// There should be just 1 unsubscription event for the later slot subscription (subscription_slot2).
// Makes 3 validator subscriptions to the same subnet but at different slots.
// There should be just 1 unsubscription event for each of the later slots subscriptions
// (subscription_slot2 and subscription_slot3).
let subscription_slot1 = 0;
let subscription_slot2 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4;
let subscription_slot3 = subscription_slot2 * 2;
let com1 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4;
let com2 = 0;
let com3 = CHAIN.chain.spec.attestation_subnet_count - com1;

// create the attestation service and subscriptions
let mut subnet_service = get_subnet_service();
Expand All @@ -532,6 +535,13 @@ mod test {
true,
);

let sub3 = get_subscription(
com3,
current_slot + Slot::new(subscription_slot3),
committee_count,
true,
);

let subnet_id1 = SubnetId::compute_subnet::<MainnetEthSpec>(
current_slot + Slot::new(subscription_slot1),
com1,
Expand All @@ -548,12 +558,23 @@ mod test {
)
.unwrap();

let subnet_id3 = SubnetId::compute_subnet::<MainnetEthSpec>(
current_slot + Slot::new(subscription_slot3),
com3,
committee_count,
&subnet_service.beacon_chain.spec,
)
.unwrap();

// Assert that subscriptions are different but their subnet is the same
assert_ne!(sub1, sub2);
assert_ne!(sub1, sub3);
assert_ne!(sub2, sub3);
assert_eq!(subnet_id1, subnet_id2);
assert_eq!(subnet_id1, subnet_id3);

// submit the subscriptions
subnet_service.validator_subscriptions(vec![sub1, sub2].into_iter());
subnet_service.validator_subscriptions(vec![sub1, sub2, sub3].into_iter());

// Unsubscription event should happen at the end of the slot.
// We wait for 2 slots, to avoid timeout issues
Expand Down Expand Up @@ -590,10 +611,36 @@ mod test {
// If the permanent and short lived subnets are different, we should get an unsubscription event.
if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) {
assert_eq!(
[expected_subscription, expected_unsubscription],
[
expected_subscription.clone(),
expected_unsubscription.clone(),
],
second_subscribe_event[..]
);
}

let subscription_slot = current_slot + subscription_slot3 - 1;

let wait_slots = subnet_service
.beacon_chain
.slot_clock
.duration_to_slot(subscription_slot)
.unwrap()
.as_millis() as u64
/ SLOT_DURATION_MILLIS;

let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await);

assert_eq!(no_events, []);

let third_subscribe_event = get_events(&mut subnet_service, None, 2).await;

if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) {
assert_eq!(
[expected_subscription, expected_unsubscription],
third_subscribe_event[..]
);
}
}

#[tokio::test]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,10 @@ impl<T: BeaconChainTypes> SingleBlockLookup<T> {
self.awaiting_parent.is_some()
|| self.block_request_state.state.is_awaiting_event()
|| match &self.component_requests {
ComponentRequests::WaitingForBlock => true,
// If components are waiting for the block request to complete, here we should
// check if the`block_request_state.state.is_awaiting_event(). However we already
// checked that above, so `WaitingForBlock => false` is equivalent.
ComponentRequests::WaitingForBlock => false,
ComponentRequests::ActiveBlobRequest(request, _) => {
request.state.is_awaiting_event()
}
Expand Down
42 changes: 41 additions & 1 deletion beacon_node/store/src/hot_cold_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2484,6 +2484,45 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
Ok(())
}

/// Run a compaction pass on the freezer DB to free up space used by deleted states.
pub fn compact_freezer(&self) -> Result<(), Error> {
let current_schema_columns = vec![
DBColumn::BeaconColdStateSummary,
DBColumn::BeaconStateSnapshot,
DBColumn::BeaconStateDiff,
DBColumn::BeaconStateRoots,
];

// We can remove this once schema V21 has been gone for a while.
let previous_schema_columns = vec![
DBColumn::BeaconState,
DBColumn::BeaconStateSummary,
DBColumn::BeaconBlockRootsChunked,
DBColumn::BeaconStateRootsChunked,
DBColumn::BeaconRestorePoint,
DBColumn::BeaconHistoricalRoots,
DBColumn::BeaconRandaoMixes,
DBColumn::BeaconHistoricalSummaries,
];
let mut columns = current_schema_columns;
columns.extend(previous_schema_columns);

for column in columns {
info!(
self.log,
"Starting compaction";
"column" => ?column
);
self.cold_db.compact_column(column)?;
info!(
self.log,
"Finishing compaction";
"column" => ?column
);
}
Ok(())
}

/// Return `true` if compaction on finalization/pruning is enabled.
pub fn compact_on_prune(&self) -> bool {
self.config.compact_on_prune
Expand Down Expand Up @@ -2875,6 +2914,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
//
// We can remove this once schema V21 has been gone for a while.
let previous_schema_columns = vec![
DBColumn::BeaconState,
DBColumn::BeaconStateSummary,
DBColumn::BeaconBlockRootsChunked,
DBColumn::BeaconStateRootsChunked,
Expand Down Expand Up @@ -2916,7 +2956,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
self.cold_db.do_atomically(cold_ops)?;

// In order to reclaim space, we need to compact the freezer DB as well.
self.cold_db.compact()?;
self.compact_freezer()?;

Ok(())
}
Expand Down
2 changes: 1 addition & 1 deletion boot_node/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "boot_node"
version = "6.0.0"
version = "6.0.1"
authors = ["Sigma Prime <[email protected]>"]
edition = { workspace = true }

Expand Down
4 changes: 2 additions & 2 deletions common/lighthouse_version/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
// NOTE: using --match instead of --exclude for compatibility with old Git
"--match=thiswillnevermatchlol"
],
prefix = "Lighthouse/v6.0.0-",
fallback = "Lighthouse/v6.0.0"
prefix = "Lighthouse/v6.0.1-",
fallback = "Lighthouse/v6.0.1"
);

/// Returns the first eight characters of the latest commit hash for this build.
Expand Down
4 changes: 2 additions & 2 deletions common/system_health/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -235,14 +235,14 @@ pub fn observe_nat() -> NatState {

let libp2p_ipv4 = lighthouse_network::metrics::get_int_gauge(
&lighthouse_network::metrics::NAT_OPEN,
&["libp2p"],
&["libp2p_ipv4"],
)
.map(|g| g.get() == 1)
.unwrap_or_default();

let libp2p_ipv6 = lighthouse_network::metrics::get_int_gauge(
&lighthouse_network::metrics::NAT_OPEN,
&["libp2p"],
&["libp2p_ipv6"],
)
.map(|g| g.get() == 1)
.unwrap_or_default();
Expand Down
2 changes: 1 addition & 1 deletion lcli/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "lcli"
description = "Lighthouse CLI (modeled after zcli)"
version = "6.0.0"
version = "6.0.1"
authors = ["Paul Hauner <[email protected]>"]
edition = { workspace = true }

Expand Down
2 changes: 1 addition & 1 deletion lighthouse/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "lighthouse"
version = "6.0.0"
version = "6.0.1"
authors = ["Sigma Prime <[email protected]>"]
edition = { workspace = true }
autotests = false
Expand Down

0 comments on commit a6de0d5

Please sign in to comment.