Skip to content

Commit

Permalink
chore: add lints to rpc (#1852)
Browse files Browse the repository at this point in the history
Lior banned `as` repo-wide, unless absolutely necessary.

Nearly all as uses can be replaced with [Try]From which doesn't
have implicit coercions like as (we encountered several bugs due to these coercions).

Motivation: we are standardizing lints across the repo and CI,
instead of each crate having separate sets of lints.

Co-Authored-By: Gilad Chase <gilad@starkware.com>
  • Loading branch information
giladchase and Gilad Chase authored Nov 8, 2024
1 parent 93dd596 commit 30a968f
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 10 deletions.
6 changes: 2 additions & 4 deletions crates/papyrus_rpc/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,5 @@ starknet_client = { workspace = true, features = ["testing"] }
strum.workspace = true
strum_macros.workspace = true

[lints.rust]
# See [here](https://github.com/taiki-e/cargo-llvm-cov/issues/370) for a discussion on why this is
# needed (from rust 1.80).
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(coverage_nightly)'] }
[lints]
workspace = true
9 changes: 6 additions & 3 deletions crates/papyrus_rpc/src/v0_8/api/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2811,7 +2811,7 @@ async fn test_get_events(
let mut parent_hash = BlockHash(felt!(GENESIS_HASH));
let mut rw_txn = storage_writer.begin_rw_txn().unwrap();
for (i, block_metadata) in block_metadatas.iter().enumerate() {
let block_number = BlockNumber(i as u64);
let block_number = BlockNumber(u64::try_from(i).expect("usize should fit in u64"));
let block = block_metadata.generate_block(&mut rng, parent_hash, block_number);

parent_hash = block.header.block_hash;
Expand Down Expand Up @@ -2863,7 +2863,10 @@ async fn test_get_events(
event_index_to_event.insert(
EventIndex(
TransactionIndex(
BlockNumber(block_metadatas.len() as u64),
BlockNumber(
u64::try_from(block_metadatas.len())
.expect("usize should fit in u64"),
),
TransactionOffsetInBlock(i_transaction),
),
EventIndexInTransactionOutput(i_event),
Expand Down Expand Up @@ -3336,7 +3339,7 @@ async fn get_events_page_size_too_big() {
async fn get_events_too_many_keys() {
let (module, _) = get_test_rpc_server_and_storage_writer::<JsonRpcServerImpl>();
let keys = (0..get_test_rpc_config().max_events_keys + 1)
.map(|i| HashSet::from([EventKey(Felt::from(i as u128))]))
.map(|i| HashSet::from([EventKey(Felt::from(u128::try_from(i).unwrap()))]))
.collect();

// Create the filter.
Expand Down
8 changes: 6 additions & 2 deletions crates/papyrus_rpc/src/v0_8/execution_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1515,7 +1515,7 @@ fn get_calldata_for_test_execution_info(
expected_transaction_version,
expected_caller_address,
expected_max_fee,
felt!(expected_signature.len() as u64),
felt!(u64::try_from(expected_signature.len()).unwrap()),
],
expected_signature,
vec![
Expand All @@ -1540,7 +1540,11 @@ fn get_calldata_for_test_execution_info(

Calldata(Arc::new(
[
vec![*CONTRACT_ADDRESS.0.key(), entry_point_selector.0, felt!(calldata.len() as u64)],
vec![
*CONTRACT_ADDRESS.0.key(),
entry_point_selector.0,
felt!(u64::try_from(calldata.len()).unwrap()),
],
calldata,
]
.iter()
Expand Down
3 changes: 2 additions & 1 deletion crates/papyrus_rpc/src/v0_8/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1236,7 +1236,8 @@ fn l1_handler_message_hash(
let to_address = Token::Bytes(contract_address.0.key().to_bytes_be().to_vec());
let nonce = Token::Bytes(nonce.to_bytes_be().to_vec());
let selector = Token::Bytes(entry_point_selector.0.to_bytes_be().to_vec());
let payload_length_as_felt = Felt::from(payload.len() as u64);
let payload_length_as_felt =
Felt::from(u64::try_from(payload.len()).expect("usize should fit in u64"));
let payload_length = Token::Bytes(payload_length_as_felt.to_bytes_be().to_vec());

let mut payload: Vec<_> =
Expand Down

0 comments on commit 30a968f

Please sign in to comment.