Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/unstable' into tree-states-archive
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelsproul committed Oct 28, 2024
2 parents 237de37 + 8188e03 commit ddf7f9c
Show file tree
Hide file tree
Showing 62 changed files with 713 additions and 1,515 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test-suite.yml
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ jobs:
channel: stable
cache-target: release
- name: Run Makefile to trigger the bash script
run: make cli
run: make cli-local
# This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether
# a PR is safe to merge. New jobs should be added here.
test-suite-success:
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ test-exec-engine:
# test vectors.
test: test-release

# Updates the CLI help text pages in the Lighthouse book, building with Docker.
# Updates the CLI help text pages in the Lighthouse book, building with Docker (primarily for Windows users).
cli:
docker run --rm --user=root \
-v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \
Expand Down
34 changes: 6 additions & 28 deletions beacon_node/beacon_chain/src/block_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlo
use crate::data_column_verification::GossipDataColumnError;
use crate::eth1_finalization_cache::Eth1FinalizationData;
use crate::execution_payload::{
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier,
validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport,
NotifyExecutionLayer, PayloadNotifier,
};
use crate::kzg_utils::blobs_to_data_column_sidecars;
use crate::observed_block_producers::SeenBlock;
Expand All @@ -74,7 +74,7 @@ use lighthouse_metrics::TryExt;
use parking_lot::RwLockReadGuard;
use proto_array::Block as ProtoBlock;
use safe_arith::ArithError;
use slog::{debug, error, warn, Logger};
use slog::{debug, error, Logger};
use slot_clock::SlotClock;
use ssz::Encode;
use ssz_derive::{Decode, Encode};
Expand All @@ -95,9 +95,9 @@ use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp};
use task_executor::JoinHandle;
use types::{
data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError,
BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecPayload, ExecutionBlockHash,
FullPayload, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch,
SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload,
Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock,
SignedBeaconBlockHeader, Slot,
};

pub const POS_PANDA_BANNER: &str = r#"
Expand Down Expand Up @@ -1382,28 +1382,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
}
let payload_verification_status = payload_notifier.notify_new_payload().await?;

// If the payload did not validate or invalidate the block, check to see if this block is
// valid for optimistic import.
if payload_verification_status.is_optimistic() {
let block_hash_opt = block
.message()
.body()
.execution_payload()
.map(|full_payload| full_payload.block_hash());

// Ensure the block is a candidate for optimistic import.
if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await?
{
warn!(
chain.log,
"Rejecting optimistic block";
"block_hash" => ?block_hash_opt,
"msg" => "the execution engine is not synced"
);
return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into());
}
}

Ok(PayloadVerificationOutcome {
payload_verification_status,
is_valid_merge_transition_block,
Expand Down
34 changes: 1 addition & 33 deletions beacon_node/beacon_chain/src/execution_payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -277,9 +277,7 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>(
}
.into()),
None => {
if allow_optimistic_import == AllowOptimisticImport::Yes
&& is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await?
{
if allow_optimistic_import == AllowOptimisticImport::Yes {
debug!(
chain.log,
"Optimistically importing merge transition block";
Expand All @@ -297,36 +295,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>(
}
}

/// Check to see if a block with the given parameters is valid to be imported optimistically.
pub async fn is_optimistic_candidate_block<T: BeaconChainTypes>(
chain: &Arc<BeaconChain<T>>,
block_slot: Slot,
block_parent_root: Hash256,
) -> Result<bool, BeaconChainError> {
let current_slot = chain.slot()?;
let inner_chain = chain.clone();

// Use a blocking task to check if the block is an optimistic candidate. Interacting
// with the `fork_choice` lock in an async task can block the core executor.
chain
.spawn_blocking_handle(
move || {
inner_chain
.canonical_head
.fork_choice_read_lock()
.is_optimistic_candidate_block(
current_slot,
block_slot,
&block_parent_root,
&inner_chain.spec,
)
},
"validate_merge_block_optimistic_candidate",
)
.await?
.map_err(BeaconChainError::from)
}

/// Validate the gossip block's execution_payload according to the checks described here:
/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block
pub fn validate_execution_payload_for_gossip<T: BeaconChainTypes>(
Expand Down
46 changes: 17 additions & 29 deletions beacon_node/beacon_chain/src/light_client_server_cache.rs
Original file line number Diff line number Diff line change
@@ -1,25 +1,19 @@
use crate::errors::BeaconChainError;
use crate::{metrics, BeaconChainTypes, BeaconStore};
use eth2::types::light_client_update::CurrentSyncCommitteeProofLen;
use parking_lot::{Mutex, RwLock};
use safe_arith::SafeArith;
use slog::{debug, Logger};
use ssz::Decode;
use ssz_types::FixedVector;
use std::num::NonZeroUsize;
use std::sync::Arc;
use store::DBColumn;
use store::KeyValueStore;
use tree_hash::TreeHash;
use types::light_client_update::{
FinalizedRootProofLen, NextSyncCommitteeProofLen, CURRENT_SYNC_COMMITTEE_INDEX,
FINALIZED_ROOT_INDEX, NEXT_SYNC_COMMITTEE_INDEX,
};
use types::non_zero_usize::new_non_zero_usize;
use types::{
BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, EthSpec, ForkName, Hash256,
LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate,
LightClientUpdate, Slot, SyncAggregate, SyncCommittee,
LightClientUpdate, MerkleProof, Slot, SyncAggregate, SyncCommittee,
};

/// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the
Expand Down Expand Up @@ -69,17 +63,14 @@ impl<T: BeaconChainTypes> LightClientServerCache<T> {
block_post_state: &mut BeaconState<T::EthSpec>,
) -> Result<(), BeaconChainError> {
let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES);

let fork_name = spec.fork_name_at_slot::<T::EthSpec>(block.slot());
// Only post-altair
if spec.fork_name_at_slot::<T::EthSpec>(block.slot()) == ForkName::Base {
return Ok(());
if fork_name.altair_enabled() {
// Persist in memory cache for a descendent block
let cached_data = LightClientCachedData::from_state(block_post_state)?;
self.prev_block_cache.lock().put(block_root, cached_data);
}

// Persist in memory cache for a descendent block

let cached_data = LightClientCachedData::from_state(block_post_state)?;
self.prev_block_cache.lock().put(block_root, cached_data);

Ok(())
}

Expand Down Expand Up @@ -413,34 +404,31 @@ impl<T: BeaconChainTypes> Default for LightClientServerCache<T> {
}
}

type FinalityBranch = FixedVector<Hash256, FinalizedRootProofLen>;
type NextSyncCommitteeBranch = FixedVector<Hash256, NextSyncCommitteeProofLen>;
type CurrentSyncCommitteeBranch = FixedVector<Hash256, CurrentSyncCommitteeProofLen>;

#[derive(Clone)]
struct LightClientCachedData<E: EthSpec> {
finalized_checkpoint: Checkpoint,
finality_branch: FinalityBranch,
next_sync_committee_branch: NextSyncCommitteeBranch,
current_sync_committee_branch: CurrentSyncCommitteeBranch,
finality_branch: MerkleProof,
next_sync_committee_branch: MerkleProof,
current_sync_committee_branch: MerkleProof,
next_sync_committee: Arc<SyncCommittee<E>>,
current_sync_committee: Arc<SyncCommittee<E>>,
finalized_block_root: Hash256,
}

impl<E: EthSpec> LightClientCachedData<E> {
fn from_state(state: &mut BeaconState<E>) -> Result<Self, BeaconChainError> {
let (finality_branch, next_sync_committee_branch, current_sync_committee_branch) = (
state.compute_finalized_root_proof()?,
state.compute_current_sync_committee_proof()?,
state.compute_next_sync_committee_proof()?,
);
Ok(Self {
finalized_checkpoint: state.finalized_checkpoint(),
finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(),
finality_branch,
next_sync_committee: state.next_sync_committee()?.clone(),
current_sync_committee: state.current_sync_committee()?.clone(),
next_sync_committee_branch: state
.compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)?
.into(),
current_sync_committee_branch: state
.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?
.into(),
next_sync_committee_branch,
current_sync_committee_branch,
finalized_block_root: state.finalized_checkpoint().root,
})
}
Expand Down
Loading

0 comments on commit ddf7f9c

Please sign in to comment.