From 5066da18ae341608dba454371b702d1ace2baef0 Mon Sep 17 00:00:00 2001 From: Gilad Chase Date: Tue, 5 Nov 2024 16:02:33 +0200 Subject: [PATCH] chore: start supporting workspace lints in p2p Remove `as` usage, before adding workspace.lint support. Part 1 out of 2 for this crate. Context: Lior banned `as` repo-wide, unless absolutely necessary. Nearly all as uses can be replaced with [Try]From which doesn't have implicit coercions like as (we encountered several bugs due to these coercions). Motivation: we are standardizing lints across the repo and CI, instead of each crate having separate sets of lints. Changes: 1. header/state_diff: added clippy ignore to `gauge!` metric usage which requires f64. This is probably unnecessary since metrics used are int values, but fixing this is out of scope, and there are no other sensible conversions from unsigned to floats. 2. test.rs: most usages of these two vars were for usize, and only a couple needed u64, so i flipped the usages. --- crates/papyrus_p2p_sync/src/client/header.rs | 2 + .../papyrus_p2p_sync/src/client/state_diff.rs | 1 + crates/papyrus_p2p_sync/src/server/test.rs | 83 ++++++++++--------- 3 files changed, 45 insertions(+), 41 deletions(-) diff --git a/crates/papyrus_p2p_sync/src/client/header.rs b/crates/papyrus_p2p_sync/src/client/header.rs index 2b034a05ab..93f04c1360 100644 --- a/crates/papyrus_p2p_sync/src/client/header.rs +++ b/crates/papyrus_p2p_sync/src/client/header.rs @@ -14,6 +14,8 @@ use super::stream_builder::{BlockData, BlockNumberLimit, DataStreamBuilder}; use super::{P2PSyncClientError, ALLOWED_SIGNATURES_LENGTH, NETWORK_DATA_TIMEOUT}; impl BlockData for SignedBlockHeader { + + #[allow(clippy::as_conversions)] // FIXME: use int metrics so `as f64` may be removed. fn write_to_storage( self: Box, storage_writer: &mut StorageWriter, diff --git a/crates/papyrus_p2p_sync/src/client/state_diff.rs b/crates/papyrus_p2p_sync/src/client/state_diff.rs index e682ddec71..c722dd0866 100644 --- a/crates/papyrus_p2p_sync/src/client/state_diff.rs +++ b/crates/papyrus_p2p_sync/src/client/state_diff.rs @@ -18,6 +18,7 @@ use crate::client::{P2PSyncClientError, NETWORK_DATA_TIMEOUT}; impl BlockData for (ThinStateDiff, BlockNumber) { #[latency_histogram("p2p_sync_state_diff_write_to_storage_latency_seconds", true)] + #[allow(clippy::as_conversions)] // FIXME: use int metrics so `as f64` may be removed. fn write_to_storage( self: Box, storage_writer: &mut StorageWriter, diff --git a/crates/papyrus_p2p_sync/src/server/test.rs b/crates/papyrus_p2p_sync/src/server/test.rs index 8f319bad5c..01e324b517 100644 --- a/crates/papyrus_p2p_sync/src/server/test.rs +++ b/crates/papyrus_p2p_sync/src/server/test.rs @@ -53,10 +53,10 @@ use starknet_api::transaction::{ use super::{split_thin_state_diff, FetchBlockDataFromDb, P2PSyncServer, P2PSyncServerChannels}; use crate::server::register_query; const BUFFER_SIZE: usize = 10; -const NUM_OF_BLOCKS: u64 = 10; +const NUM_OF_BLOCKS: usize = 10; const NUM_TXS_PER_BLOCK: usize = 5; const EVENTS_PER_TX: usize = 2; -const BLOCKS_DELTA: u64 = 5; +const BLOCKS_DELTA: usize = 5; enum StartBlockType { Hash, @@ -68,11 +68,11 @@ enum StartBlockType { async fn header_query_positive_flow() { let assert_signed_block_header = |data: Vec| { let len = data.len(); - assert!(len == NUM_OF_BLOCKS as usize); + assert_eq!(len, NUM_OF_BLOCKS); for (i, signed_header) in data.into_iter().enumerate() { assert_eq!( signed_header.block_header.block_header_without_hash.block_number.0, - i as u64 + u64::try_from(i).unwrap() ); } }; @@ -85,7 +85,7 @@ async fn header_query_positive_flow() { async fn transaction_query_positive_flow() { let assert_transaction_and_output = |data: Vec| { let len = data.len(); - assert_eq!(len, NUM_OF_BLOCKS as usize * NUM_TXS_PER_BLOCK); + assert_eq!(len, NUM_OF_BLOCKS * NUM_TXS_PER_BLOCK); for (i, FullTransaction { transaction, transaction_output, transaction_hash }) in data.into_iter().enumerate() { @@ -120,7 +120,7 @@ async fn state_diff_query_positive_flow() { #[tokio::test] async fn event_query_positive_flow() { let assert_event = |data: Vec<(Event, TransactionHash)>| { - assert_eq!(data.len(), NUM_OF_BLOCKS as usize * NUM_TXS_PER_BLOCK * EVENTS_PER_TX); + assert_eq!(data.len(), NUM_OF_BLOCKS * NUM_TXS_PER_BLOCK * EVENTS_PER_TX); for (i, (event, tx_hash)) in data.into_iter().enumerate() { assert_eq!( tx_hash, @@ -173,11 +173,11 @@ async fn class_query_positive_flow() { async fn header_query_some_blocks_are_missing() { let assert_signed_block_header = |data: Vec| { let len = data.len(); - assert!(len == BLOCKS_DELTA as usize); + assert!(len == BLOCKS_DELTA); for (i, signed_header) in data.into_iter().enumerate() { assert_eq!( signed_header.block_header.block_header_without_hash.block_number.0, - i as u64 + NUM_OF_BLOCKS - BLOCKS_DELTA + u64::try_from(i + NUM_OF_BLOCKS - BLOCKS_DELTA).unwrap() ); } }; @@ -194,23 +194,22 @@ async fn header_query_some_blocks_are_missing() { async fn transaction_query_some_blocks_are_missing() { let assert_transaction_and_output = |data: Vec| { let len = data.len(); - assert!(len == (BLOCKS_DELTA as usize * NUM_TXS_PER_BLOCK)); + assert!(len == (BLOCKS_DELTA * NUM_TXS_PER_BLOCK)); for (i, FullTransaction { transaction, transaction_output, transaction_hash }) in data.into_iter().enumerate() { assert_eq!( transaction, - TXS[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS as usize - BLOCKS_DELTA as usize] - [i % NUM_TXS_PER_BLOCK] + TXS[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS - BLOCKS_DELTA][i % NUM_TXS_PER_BLOCK] ); assert_eq!( transaction_output, - TX_OUTPUTS[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS as usize - BLOCKS_DELTA as usize] + TX_OUTPUTS[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS - BLOCKS_DELTA] [i % NUM_TXS_PER_BLOCK] ); assert_eq!( transaction_hash, - TX_HASHES[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS as usize - BLOCKS_DELTA as usize] + TX_HASHES[i / NUM_TXS_PER_BLOCK + NUM_OF_BLOCKS - BLOCKS_DELTA] [i % NUM_TXS_PER_BLOCK] ); } @@ -229,13 +228,11 @@ async fn state_diff_query_some_blocks_are_missing() { let assert_state_diff_chunk = |data: Vec| { // create_random_state_diff creates a state diff with 5 chunks. const STATE_DIFF_CHUNK_PER_BLOCK: usize = 5; - assert_eq!(data.len(), BLOCKS_DELTA as usize * STATE_DIFF_CHUNK_PER_BLOCK); + assert_eq!(data.len(), BLOCKS_DELTA * STATE_DIFF_CHUNK_PER_BLOCK); for (i, data) in data.into_iter().enumerate() { assert_eq!( data, - STATE_DIFF_CHUNCKS[i - + (NUM_OF_BLOCKS as usize - BLOCKS_DELTA as usize) - * STATE_DIFF_CHUNK_PER_BLOCK] + STATE_DIFF_CHUNCKS[i + (NUM_OF_BLOCKS - BLOCKS_DELTA) * STATE_DIFF_CHUNK_PER_BLOCK] ); } }; @@ -252,18 +249,17 @@ async fn state_diff_query_some_blocks_are_missing() { async fn event_query_some_blocks_are_missing() { let assert_event = |data: Vec<(Event, TransactionHash)>| { let len = data.len(); - assert_eq!(len, BLOCKS_DELTA as usize * NUM_TXS_PER_BLOCK * EVENTS_PER_TX); + assert_eq!(len, BLOCKS_DELTA * NUM_TXS_PER_BLOCK * EVENTS_PER_TX); for (i, (event, tx_hash)) in data.into_iter().enumerate() { assert_eq!( tx_hash, - TX_HASHES[i / (NUM_TXS_PER_BLOCK * EVENTS_PER_TX) - + (NUM_OF_BLOCKS - BLOCKS_DELTA) as usize] + TX_HASHES[i / (NUM_TXS_PER_BLOCK * EVENTS_PER_TX) + (NUM_OF_BLOCKS - BLOCKS_DELTA)] [i / EVENTS_PER_TX % NUM_TXS_PER_BLOCK] ); assert_eq!( event, EVENTS[i / (NUM_TXS_PER_BLOCK * EVENTS_PER_TX) - + (NUM_OF_BLOCKS - BLOCKS_DELTA) as usize + + (NUM_OF_BLOCKS - BLOCKS_DELTA) + i / EVENTS_PER_TX % NUM_TXS_PER_BLOCK] ); } @@ -282,12 +278,12 @@ async fn class_query_some_blocks_are_missing() { let assert_class = |data: Vec<(ApiContractClass, ClassHash)>| { // create_random_state_diff creates a state diff with 1 declared class // and 1 deprecated declared class - assert_eq!(data.len(), BLOCKS_DELTA as usize * 2); + assert_eq!(data.len(), BLOCKS_DELTA * 2); for (i, data) in data.iter().enumerate() { match data { (ApiContractClass::ContractClass(contract_class), class_hash) => { let (expected_class_hash, expected_contract_class) = - &CLASSES_WITH_HASHES[i / 2 + (NUM_OF_BLOCKS - BLOCKS_DELTA) as usize][0]; + &CLASSES_WITH_HASHES[i / 2 + NUM_OF_BLOCKS - BLOCKS_DELTA][0]; assert_eq!(contract_class, expected_contract_class); assert_eq!(class_hash, expected_class_hash); } @@ -296,8 +292,7 @@ async fn class_query_some_blocks_are_missing() { class_hash, ) => { let (expected_class_hash, expected_contract_class) = - &DEPRECATED_CLASSES_WITH_HASHES - [i / 2 + (NUM_OF_BLOCKS - BLOCKS_DELTA) as usize][0]; + &DEPRECATED_CLASSES_WITH_HASHES[i / 2 + NUM_OF_BLOCKS - BLOCKS_DELTA][0]; assert_eq!(deprecated_contract_class, expected_contract_class); assert_eq!(class_hash, expected_class_hash); } @@ -314,7 +309,7 @@ async fn class_query_some_blocks_are_missing() { async fn run_test( assert_fn: F, - start_block_number: u64, + start_block_number: usize, start_block_type: StartBlockType, ) where T: FetchBlockDataFromDb + std::fmt::Debug + PartialEq + Send + Sync + 'static, @@ -342,21 +337,27 @@ async fn run_test( // put some data in the storage. insert_to_storage_test_blocks_up_to(&mut storage_writer); + let block_number = BlockNumber(start_block_number.try_into().unwrap()); let start_block = match start_block_type { StartBlockType::Hash => BlockHashOrNumber::Hash( storage_reader .begin_ro_txn() .unwrap() - .get_block_header(BlockNumber(start_block_number)) + .get_block_header(block_number) .unwrap() .unwrap() .block_hash, ), - StartBlockType::Number => BlockHashOrNumber::Number(BlockNumber(start_block_number)), + StartBlockType::Number => BlockHashOrNumber::Number(block_number), }; // register a query. - let query = Query { start_block, direction: Direction::Forward, limit: NUM_OF_BLOCKS, step: 1 }; + let query = Query { + start_block, + direction: Direction::Forward, + limit: NUM_OF_BLOCKS.try_into().unwrap(), + step: 1, + }; let query = TQuery::from(query); let (server_query_manager, _report_sender, response_reciever) = create_test_server_query_manager(query); @@ -424,38 +425,38 @@ fn setup() -> TestArgs { use starknet_api::core::ClassHash; fn insert_to_storage_test_blocks_up_to(storage_writer: &mut StorageWriter) { for i in 0..NUM_OF_BLOCKS { - let i_usize = usize::try_from(i).unwrap(); + let block_number = BlockNumber(i.try_into().unwrap()); let block_header = BlockHeader { block_hash: BlockHash(random::().into()), block_header_without_hash: BlockHeaderWithoutHash { - block_number: BlockNumber(i), + block_number, ..Default::default() }, ..Default::default() }; - let classes_with_hashes = CLASSES_WITH_HASHES[i_usize] + let classes_with_hashes = CLASSES_WITH_HASHES[i] .iter() .map(|(class_hash, contract_class)| (*class_hash, contract_class)) .collect::>(); - let deprecated_classes_with_hashes = DEPRECATED_CLASSES_WITH_HASHES[i_usize] + let deprecated_classes_with_hashes = DEPRECATED_CLASSES_WITH_HASHES[i] .iter() .map(|(class_hash, contract_class)| (*class_hash, contract_class)) .collect::>(); storage_writer .begin_rw_txn() .unwrap() - .append_header(BlockNumber(i), &block_header) + .append_header(block_number, &block_header) .unwrap() // TODO(shahak): Put different signatures for each block to test that we retrieve the // right signatures. - .append_block_signature(BlockNumber(i), &BlockSignature::default()) + .append_block_signature(block_number, &BlockSignature::default()) .unwrap() - .append_state_diff(BlockNumber(i), THIN_STATE_DIFFS[i_usize].clone()) + .append_state_diff(block_number, THIN_STATE_DIFFS[i].clone()) .unwrap() - .append_body(BlockNumber(i), BlockBody{transactions: TXS[i_usize].clone(), - transaction_outputs: TX_OUTPUTS[i_usize].clone(), - transaction_hashes: TX_HASHES[i_usize].clone(),}).unwrap() - .append_classes(BlockNumber(i), &classes_with_hashes, &deprecated_classes_with_hashes) + .append_body(block_number, BlockBody{transactions: TXS[i].clone(), + transaction_outputs: TX_OUTPUTS[i].clone(), + transaction_hashes: TX_HASHES[i].clone(),}).unwrap() + .append_classes(block_number, &classes_with_hashes, &deprecated_classes_with_hashes) .unwrap() .commit() .unwrap(); @@ -470,7 +471,7 @@ lazy_static! { static ref STATE_DIFF_CHUNCKS: Vec = THIN_STATE_DIFFS.iter().flat_map(|diff| split_thin_state_diff(diff.clone())).collect(); static ref BODY: BlockBody = - get_test_body(NUM_OF_BLOCKS as usize * NUM_TXS_PER_BLOCK, Some(EVENTS_PER_TX), None, None); + get_test_body(NUM_OF_BLOCKS * NUM_TXS_PER_BLOCK, Some(EVENTS_PER_TX), None, None); static ref TXS: Vec> = BODY.clone().transactions.chunks(NUM_TXS_PER_BLOCK).map(|chunk| chunk.to_vec()).collect(); static ref TX_OUTPUTS: Vec> = BODY