Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: add lint to p2p_sync #1841

Merged
merged 1 commit into from
Nov 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions crates/papyrus_p2p_sync/src/client/header.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ use super::stream_builder::{BlockData, BlockNumberLimit, DataStreamBuilder};
use super::{P2PSyncClientError, ALLOWED_SIGNATURES_LENGTH, NETWORK_DATA_TIMEOUT};

impl BlockData for SignedBlockHeader {
#[allow(clippy::as_conversions)] // FIXME: use int metrics so `as f64` may be removed.
fn write_to_storage(
self: Box<Self>,
storage_writer: &mut StorageWriter,
Expand Down
1 change: 1 addition & 0 deletions crates/papyrus_p2p_sync/src/client/state_diff.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use crate::client::{P2PSyncClientError, NETWORK_DATA_TIMEOUT};

impl BlockData for (ThinStateDiff, BlockNumber) {
#[latency_histogram("p2p_sync_state_diff_write_to_storage_latency_seconds", true)]
#[allow(clippy::as_conversions)] // FIXME: use int metrics so `as f64` may be removed.
fn write_to_storage(
self: Box<Self>,
storage_writer: &mut StorageWriter,
Expand Down
83 changes: 42 additions & 41 deletions crates/papyrus_p2p_sync/src/server/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,10 @@ use starknet_api::transaction::{
use super::{split_thin_state_diff, FetchBlockDataFromDb, P2PSyncServer, P2PSyncServerChannels};
use crate::server::register_query;
const BUFFER_SIZE: usize = 10;
const NUM_OF_BLOCKS: u64 = 10;
const NUM_OF_BLOCKS: usize = 10;
const NUM_TXS_PER_BLOCK: usize = 5;
const EVENTS_PER_TX: usize = 2;
const BLOCKS_DELTA: u64 = 5;
const BLOCKS_DELTA: usize = 5;

enum StartBlockType {
Hash,
Expand All @@ -68,11 +68,11 @@ enum StartBlockType {
async fn header_query_positive_flow() {
let assert_signed_block_header = |data: Vec<SignedBlockHeader>| {
let len = data.len();
assert!(len == NUM_OF_BLOCKS as usize);
assert_eq!(len, NUM_OF_BLOCKS);
for (i, signed_header) in data.into_iter().enumerate() {
assert_eq!(
signed_header.block_header.block_header_without_hash.block_number.0,
i as u64
u64::try_from(i).unwrap()
);
}
};
Expand All @@ -85,7 +85,7 @@ async fn header_query_positive_flow() {
async fn transaction_query_positive_flow() {
let assert_transaction_and_output = |data: Vec<FullTransaction>| {
let len = data.len();
assert_eq!(len, NUM_OF_BLOCKS as usize * NUM_TXS_PER_BLOCK);
assert_eq!(len, NUM_OF_BLOCKS * NUM_TXS_PER_BLOCK);
for (i, FullTransaction { transaction, transaction_output, transaction_hash }) in
data.into_iter().enumerate()
{
Expand Down Expand Up @@ -120,7 +120,7 @@ async fn state_diff_query_positive_flow() {
#[tokio::test]
async fn event_query_positive_flow() {
let assert_event = |data: Vec<(Event, TransactionHash)>| {
assert_eq!(data.len(), NUM_OF_BLOCKS as usize * NUM_TXS_PER_BLOCK * EVENTS_PER_TX);
assert_eq!(data.len(), NUM_OF_BLOCKS * NUM_TXS_PER_BLOCK * EVENTS_PER_TX);
for (i, (event, tx_hash)) in data.into_iter().enumerate() {
assert_eq!(
tx_hash,
Expand Down Expand Up @@ -173,11 +173,11 @@ async fn class_query_positive_flow() {
async fn header_query_some_blocks_are_missing() {
let assert_signed_block_header = |data: Vec<SignedBlockHeader>| {
let len = data.len();
assert!(len == BLOCKS_DELTA as usize);
assert!(len == BLOCKS_DELTA);
for (i, signed_header) in data.into_iter().enumerate() {
assert_eq!(
signed_header.block_header.block_header_without_hash.block_number.0,
i as u64 + NUM_OF_BLOCKS - BLOCKS_DELTA
u64::try_from(i + NUM_OF_BLOCKS - BLOCKS_DELTA).unwrap()
);
}
};
Expand All @@ -194,23 +194,22 @@ async fn header_query_some_blocks_are_missing() {
async fn transaction_query_some_blocks_are_missing() {
let assert_transaction_and_output = |data: Vec<FullTransaction>| {
let len = data.len();
assert!(len == (BLOCKS_DELTA as usize * NUM_TXS_PER_BLOCK));
assert!(len == (BLOCKS_DELTA * NUM_TXS_PER_BLOCK));
for (i, FullTransaction { transaction, transaction_output, transaction_hash }) in
data.into_iter().enumerate()
{
assert_eq!(
transaction,
TXS[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS as usize - BLOCKS_DELTA as usize]
[i % NUM_TXS_PER_BLOCK]
TXS[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS - BLOCKS_DELTA][i % NUM_TXS_PER_BLOCK]
);
assert_eq!(
transaction_output,
TX_OUTPUTS[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS as usize - BLOCKS_DELTA as usize]
TX_OUTPUTS[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS - BLOCKS_DELTA]
[i % NUM_TXS_PER_BLOCK]
);
assert_eq!(
transaction_hash,
TX_HASHES[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS as usize - BLOCKS_DELTA as usize]
TX_HASHES[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS - BLOCKS_DELTA]
[i % NUM_TXS_PER_BLOCK]
);
}
Expand All @@ -229,13 +228,11 @@ async fn state_diff_query_some_blocks_are_missing() {
let assert_state_diff_chunk = |data: Vec<StateDiffChunk>| {
// create_random_state_diff creates a state diff with 5 chunks.
const STATE_DIFF_CHUNK_PER_BLOCK: usize = 5;
assert_eq!(data.len(), BLOCKS_DELTA as usize * STATE_DIFF_CHUNK_PER_BLOCK);
assert_eq!(data.len(), BLOCKS_DELTA * STATE_DIFF_CHUNK_PER_BLOCK);
for (i, data) in data.into_iter().enumerate() {
assert_eq!(
data,
STATE_DIFF_CHUNCKS[i
+ (NUM_OF_BLOCKS as usize - BLOCKS_DELTA as usize)
* STATE_DIFF_CHUNK_PER_BLOCK]
STATE_DIFF_CHUNCKS[i + (NUM_OF_BLOCKS - BLOCKS_DELTA) * STATE_DIFF_CHUNK_PER_BLOCK]
);
}
};
Expand All @@ -252,18 +249,17 @@ async fn state_diff_query_some_blocks_are_missing() {
async fn event_query_some_blocks_are_missing() {
let assert_event = |data: Vec<(Event, TransactionHash)>| {
let len = data.len();
assert_eq!(len, BLOCKS_DELTA as usize * NUM_TXS_PER_BLOCK * EVENTS_PER_TX);
assert_eq!(len, BLOCKS_DELTA * NUM_TXS_PER_BLOCK * EVENTS_PER_TX);
for (i, (event, tx_hash)) in data.into_iter().enumerate() {
assert_eq!(
tx_hash,
TX_HASHES[i / (NUM_TXS_PER_BLOCK * EVENTS_PER_TX)
+ (NUM_OF_BLOCKS - BLOCKS_DELTA) as usize]
TX_HASHES[i / (NUM_TXS_PER_BLOCK * EVENTS_PER_TX) + (NUM_OF_BLOCKS - BLOCKS_DELTA)]
[i / EVENTS_PER_TX % NUM_TXS_PER_BLOCK]
);
assert_eq!(
event,
EVENTS[i / (NUM_TXS_PER_BLOCK * EVENTS_PER_TX)
+ (NUM_OF_BLOCKS - BLOCKS_DELTA) as usize
+ (NUM_OF_BLOCKS - BLOCKS_DELTA)
+ i / EVENTS_PER_TX % NUM_TXS_PER_BLOCK]
);
}
Expand All @@ -282,12 +278,12 @@ async fn class_query_some_blocks_are_missing() {
let assert_class = |data: Vec<(ApiContractClass, ClassHash)>| {
// create_random_state_diff creates a state diff with 1 declared class
// and 1 deprecated declared class
assert_eq!(data.len(), BLOCKS_DELTA as usize * 2);
assert_eq!(data.len(), BLOCKS_DELTA * 2);
for (i, data) in data.iter().enumerate() {
match data {
(ApiContractClass::ContractClass(contract_class), class_hash) => {
let (expected_class_hash, expected_contract_class) =
&CLASSES_WITH_HASHES[i / 2 + (NUM_OF_BLOCKS - BLOCKS_DELTA) as usize][0];
&CLASSES_WITH_HASHES[i / 2 + NUM_OF_BLOCKS - BLOCKS_DELTA][0];
assert_eq!(contract_class, expected_contract_class);
assert_eq!(class_hash, expected_class_hash);
}
Expand All @@ -296,8 +292,7 @@ async fn class_query_some_blocks_are_missing() {
class_hash,
) => {
let (expected_class_hash, expected_contract_class) =
&DEPRECATED_CLASSES_WITH_HASHES
[i / 2 + (NUM_OF_BLOCKS - BLOCKS_DELTA) as usize][0];
&DEPRECATED_CLASSES_WITH_HASHES[i / 2 + NUM_OF_BLOCKS - BLOCKS_DELTA][0];
assert_eq!(deprecated_contract_class, expected_contract_class);
assert_eq!(class_hash, expected_class_hash);
}
Expand All @@ -314,7 +309,7 @@ async fn class_query_some_blocks_are_missing() {

async fn run_test<T, F, TQuery>(
assert_fn: F,
start_block_number: u64,
start_block_number: usize,
start_block_type: StartBlockType,
) where
T: FetchBlockDataFromDb + std::fmt::Debug + PartialEq + Send + Sync + 'static,
Expand Down Expand Up @@ -342,21 +337,27 @@ async fn run_test<T, F, TQuery>(
// put some data in the storage.
insert_to_storage_test_blocks_up_to(&mut storage_writer);

let block_number = BlockNumber(start_block_number.try_into().unwrap());
let start_block = match start_block_type {
StartBlockType::Hash => BlockHashOrNumber::Hash(
storage_reader
.begin_ro_txn()
.unwrap()
.get_block_header(BlockNumber(start_block_number))
.get_block_header(block_number)
.unwrap()
.unwrap()
.block_hash,
),
StartBlockType::Number => BlockHashOrNumber::Number(BlockNumber(start_block_number)),
StartBlockType::Number => BlockHashOrNumber::Number(block_number),
};

// register a query.
let query = Query { start_block, direction: Direction::Forward, limit: NUM_OF_BLOCKS, step: 1 };
let query = Query {
start_block,
direction: Direction::Forward,
limit: NUM_OF_BLOCKS.try_into().unwrap(),
step: 1,
};
let query = TQuery::from(query);
let (server_query_manager, _report_sender, response_reciever) =
create_test_server_query_manager(query);
Expand Down Expand Up @@ -424,38 +425,38 @@ fn setup() -> TestArgs {
use starknet_api::core::ClassHash;
fn insert_to_storage_test_blocks_up_to(storage_writer: &mut StorageWriter) {
for i in 0..NUM_OF_BLOCKS {
let i_usize = usize::try_from(i).unwrap();
let block_number = BlockNumber(i.try_into().unwrap());
let block_header = BlockHeader {
block_hash: BlockHash(random::<u64>().into()),
block_header_without_hash: BlockHeaderWithoutHash {
block_number: BlockNumber(i),
block_number,
..Default::default()
},
..Default::default()
};
let classes_with_hashes = CLASSES_WITH_HASHES[i_usize]
let classes_with_hashes = CLASSES_WITH_HASHES[i]
.iter()
.map(|(class_hash, contract_class)| (*class_hash, contract_class))
.collect::<Vec<_>>();
let deprecated_classes_with_hashes = DEPRECATED_CLASSES_WITH_HASHES[i_usize]
let deprecated_classes_with_hashes = DEPRECATED_CLASSES_WITH_HASHES[i]
.iter()
.map(|(class_hash, contract_class)| (*class_hash, contract_class))
.collect::<Vec<_>>();
storage_writer
.begin_rw_txn()
.unwrap()
.append_header(BlockNumber(i), &block_header)
.append_header(block_number, &block_header)
.unwrap()
// TODO(shahak): Put different signatures for each block to test that we retrieve the
// right signatures.
.append_block_signature(BlockNumber(i), &BlockSignature::default())
.append_block_signature(block_number, &BlockSignature::default())
.unwrap()
.append_state_diff(BlockNumber(i), THIN_STATE_DIFFS[i_usize].clone())
.append_state_diff(block_number, THIN_STATE_DIFFS[i].clone())
.unwrap()
.append_body(BlockNumber(i), BlockBody{transactions: TXS[i_usize].clone(),
transaction_outputs: TX_OUTPUTS[i_usize].clone(),
transaction_hashes: TX_HASHES[i_usize].clone(),}).unwrap()
.append_classes(BlockNumber(i), &classes_with_hashes, &deprecated_classes_with_hashes)
.append_body(block_number, BlockBody{transactions: TXS[i].clone(),
transaction_outputs: TX_OUTPUTS[i].clone(),
transaction_hashes: TX_HASHES[i].clone(),}).unwrap()
.append_classes(block_number, &classes_with_hashes, &deprecated_classes_with_hashes)
.unwrap()
.commit()
.unwrap();
Expand All @@ -470,7 +471,7 @@ lazy_static! {
static ref STATE_DIFF_CHUNCKS: Vec<StateDiffChunk> =
THIN_STATE_DIFFS.iter().flat_map(|diff| split_thin_state_diff(diff.clone())).collect();
static ref BODY: BlockBody =
get_test_body(NUM_OF_BLOCKS as usize * NUM_TXS_PER_BLOCK, Some(EVENTS_PER_TX), None, None);
get_test_body(NUM_OF_BLOCKS * NUM_TXS_PER_BLOCK, Some(EVENTS_PER_TX), None, None);
static ref TXS: Vec<Vec<Transaction>> =
BODY.clone().transactions.chunks(NUM_TXS_PER_BLOCK).map(|chunk| chunk.to_vec()).collect();
static ref TX_OUTPUTS: Vec<Vec<TransactionOutput>> = BODY
Expand Down
Loading