diff --git a/.github/workflows/rust-sdk-tests.yml b/.github/workflows/rust-sdk-tests.yml new file mode 100644 index 0000000..49ad890 --- /dev/null +++ b/.github/workflows/rust-sdk-tests.yml @@ -0,0 +1,21 @@ +name: Rust SDK CI +on: + workflow_dispatch: + push: + +env: + CARGO_TERM_COLOR: always + +jobs: + rust-sdk-ci: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - uses: Swatinem/rust-cache@v2 + - name: Set up Rust PATH + run: echo "$HOME/.cargo/bin:$PATH" >> $GITHUB_PATH + - name: Lint rust + run: make check-lint + - name: Test rust + run: make test diff --git a/.gitignore b/.gitignore index 6985cf1..1a48c92 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,5 @@ Cargo.lock # MSVC Windows builds of rustc generate these, which store debugging information *.pdb + +.idea/ diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..0f7e015 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "redstone" +version = "1.0.0" +edition = "2021" +authors = ["RedStone "] +description = "A Rust implementation of deserializing&decrypting RedStone payload" + +[features] +default = ["core", "network"] + +# A core functionality of the package. +core = ["sha3/asm"] + +# An interface for the network to be extended. +network = [] + +# An extension for casper network +network_casper = ["casper-contract/wee_alloc", "casper-types", "network"] + +# An extension for debug-printing of messages in the Casper extension. Not supported by Casper Contracts deployed to the network. +print_debug = ["casper-contract/test-support"] + +# A variant of decrypting the message-signers using secp256k1 library. Cheaper in runtime. +crypto_secp256k1 = ["secp256k1/recovery", "secp256k1/lowmemory", "secp256k1/alloc"] + +# A variant of decrypting the message-signers using k256 library. Cheaper during contract deployment. +crypto_k256 = ["k256/alloc", "k256/sha256", "k256/ecdsa"] + +# A set of helpers for testing & offline usage. +helpers = ["hex/serde", "hex/alloc", "network"] + +[dependencies] +casper-contract = { version = "^4.0.0", default-features = false, features = [], optional = true } +casper-types = { version = "^4.0.1", default-features = false, features = [], optional = true } +sha3 = { version = "^0.10.8", default-features = false, features = [], optional = true } +k256 = { version = "^0.13.3", default-features = false, features = [], optional = true } +secp256k1 = { version = "^0.29.0", default-features = false, features = [], optional = true } +hex = { version = "^0.4.3", default-features = false, features = [], optional = true } + +[dev-dependencies] +itertools = { version = "^0.12.1" } diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..aafe1e0 --- /dev/null +++ b/Makefile @@ -0,0 +1,40 @@ +CLIPPY=cargo clippy --release --fix --allow-dirty --allow-staged +DOC=cargo doc --no-deps --document-private-items +TEST=RUST_BACKTRACE=full cargo test --features="helpers" +FEATURE_SETS="crypto_k256" "crypto_k256,network_casper" "crypto_secp256k1" "crypto_secp256k1,network_casper" + + +prepare: + @rustup target add wasm32-unknown-unknown + +test: + @for features in $(FEATURE_SETS); do \ + echo "Running tests with features: $$features"; \ + ($(TEST) --features=$$features); \ + done + +docs: + @for features in $(FEATURE_SETS); do \ + echo "Documenting redstone with features: $$features"; \ + (rm -rf ./target/doc && $(DOC) --features=$$features && mkdir -p ../target/rust-docs/redstone && cp -r ../target/doc ../target/rust-docs/redstone/$$features); \ + done + +coverage: + cargo install grcov --version=0.5.15 + CARGO_INCREMENTAL=0 \ + RUSTFLAGS="-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort" \ + RUSTDOCFLAGS="-Cpanic=abort" cargo build --features="crypto_k256" + CARGO_INCREMENTAL=0 \ + RUSTFLAGS="-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort" \ + RUSTDOCFLAGS="-Cpanic=abort" $(TEST) --features="crypto_k256" + +clippy: prepare + @for features in $(FEATURE_SETS); do \ + ($(CLIPPY) --all-targets --features=$$features -- -D warnings); \ + done + +check-lint: clippy + cargo fmt -- --check + +lint: clippy + cargo fmt diff --git a/README.md b/README.md index 183e6d4..f5d9db3 100644 --- a/README.md +++ b/README.md @@ -1 +1,6 @@ -# rust-sdk \ No newline at end of file +# RedStone + +## See autogenerated docs for: + +1. [`redstone` crate with `crypto_secp256k1` and `network_casper`](https://redstone-docs-git-casper-redstone-finance.vercel.app/rust/casper/redstone/crypto_secp256k1,network_casper/redstone/index.html) + features (the base version for [contracts](../contracts/README.md)) diff --git a/sample-data/payload.hex b/sample-data/payload.hex new file mode 100644 index 0000000..b248a10 --- /dev/null +++ b/sample-data/payload.hex @@ -0,0 +1 @@ +45544800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003709f35687018d8378f9800000002000000139c69986b10617291f07fe420bd9991de3aca4dd4c1a7e77f075aebbe56221a846649b1083773c945ae89c0074331037eba52b7f57dc634426099a7ec63f45711b45544800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003709f2ff20018d8378f980000000200000017dcdb6561923ebea544f9cd51aa32a1d37eba0960bfa9121cef9cdedd3135c88009e6c2e761492d14a4674ff3958a3f1c7b3e7c14b9bc08e107d8bab0a8f5a3d1c45544800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003709ed5659018d8378f980000000200000015687773e177cd39b410c06f25bd61e52abe8d8f8dffba822168eb06d9ac86fb947d83779cb9437f74e21359ed09568284da76d92351ee9a80df27ea4e6c8e5cc1b45544800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003709ebaa55018d8378f98000000020000001f321c0ca3703cad49fe373d98a4cceba1fad5172fdb0adb47877277328a869867d1d10c8e69e762cf6035983c0504423b2389bd24638c910c16467c4b9c4c4521b455448000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000037094b8cc2018d8378f98000000020000001c9fd2f5f1f01612a41c9337e3c1939050284dcc72b091cd4503d75913a38b7773d3bec57a07d95468b29c42504b2bec4546b655a33adbc062eeeeddf275986bb1c4254430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e7316ea122018d8378f9800000002000000156588a1f87423c2675eb6f780b73649beb98ee062fb732ef1521e1b3e6ed366e7c584eab98215a7fb8b0cc136fde92f0f8f76fb49dd8534f44fbaab00b7a7be41b4254430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e73180bb79018d8378f98000000020000001d14781f831df36e6529deb617d17887994092ac5ea7e0ea9c2f12ab84fc6a2952f48411f309aad4b16cc698a3c6dd60a5c998771fca4d08397d84948e6daff721c4254430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e730b34178018d8378f980000000200000017f092d8108b516db8b163b382eb9d828848fe636bb433c8d0420e2451c6a960f5b1c224394b1cba681658c219b208fca2d06d90fa943e625772a3528c5d857d81c4254430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e730580d76018d8378f9800000002000000159dda68806f9c64b9c93c2bb21d079cf3f1d07cae51da8173f9552fa2bcd3cf73758c0e46b1eecc46d12f1876cfbda3360cb1a25a11a38ef21c5f9e539c27c981b4254430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e7318b8ad3018d8378f98000000020000001b4f2c86c5a1300c4f001a6878525afc23f9fe1aefbab7861439fd613f839d04a39caf6a037d4bb3996b90e98c2d2d850691eefe3f67e502f6278aa0a652c2e8a1b415641580000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000cbc887e2018d8378f980000000200000011001f8291b8b722e939f5aea0a436375d451d79ff5c1e248d2857f8fe341db007d8692a67c0bcf777f624c06da5b16ce30ec03b2fcbd301a3daec708eab215961c415641580000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000cbc8855a018d8378f980000000200000019ae0e0a4ceb48265b808e42ad2463984f6b4262dacbe34d6d085734177682d7474effb1cc22327b9f4ed31862c8d19868101b6dbed7f0d9579e22824cbe420421b415641580000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000cbc85b72018d8378f980000000200000017f05693fd3b0b4cae6c9c55507ae69a1682f63c2854215198400549777c7a4fa13d632296e7db0f24de80a4548bc9dfec8c4b82a954294f7cb8295e88006164b1b415641580000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000cbc84f11018d8378f98000000020000001722670ba61759fd47cb5eeceeda2c69695c2d3a633784af475d9a5c682d03f74250d4de708849ae0fc6eb6d043fa27be75357ace22aef071705d5f1bcbbac7f51b415641580000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000cbc8878f018d8378f98000000020000001d31cf023168e23772053a48dfbade05535bd2348d6a5e4b42516206037adbe715e8f272703763169a17b65073b88ab5a4201e514d8436f6e169114cf53c709521b000f000000000002ed57011e0000 \ No newline at end of file diff --git a/src/core/aggregator.rs b/src/core/aggregator.rs new file mode 100644 index 0000000..7f1647a --- /dev/null +++ b/src/core/aggregator.rs @@ -0,0 +1,291 @@ +use crate::{ + core::{config::Config, validator::Validator}, + network::specific::U256, + print_debug, + protocol::data_package::DataPackage, + utils::median::Median, +}; + +type Matrix = Vec>>; + +/// Aggregates values from a collection of data packages according to the provided configuration. +/// +/// This function takes a configuration and a vector of data packages, constructs a matrix of values +/// and their corresponding signers, and then aggregates these values based on the aggregation logic +/// defined in the provided configuration. The aggregation strategy could vary, for example, by taking +/// an average of the values, selecting the median, or applying a custom algorithm defined within the +/// `aggregate_matrix` function. +/// +/// The primary purpose of this function is to consolidate data from multiple sources into a coherent +/// and singular value set that adheres to the criteria specified in the `Config`. +/// +/// # Arguments +/// +/// * `config` - A `Config` instance containing settings and parameters used to guide the aggregation process. +/// * `data_packages` - A vector of `DataPackage` instances, each representing a set of values and associated +/// metadata collected from various sources or signers. +/// +/// # Returns +/// +/// Returns a `Vec`, which is a vector of aggregated values resulting from applying the aggregation +/// logic to the input data packages as per the specified configuration. Each `U256` value in the vector +/// represents an aggregated result derived from the corresponding data packages. +/// +/// # Note +/// +/// This function is internal to the crate (`pub(crate)`) and not exposed as part of the public API. It is +/// designed to be used by other components within the same crate that require value aggregation functionality. +pub(crate) fn aggregate_values(config: Config, data_packages: Vec) -> Vec { + aggregate_matrix(make_value_signer_matrix(&config, data_packages), config) +} + +fn aggregate_matrix(matrix: Matrix, config: Config) -> Vec { + matrix + .iter() + .enumerate() + .map(|(index, values)| { + config + .validate_signer_count_threshold(index, values) + .median() + }) + .collect() +} + +fn make_value_signer_matrix(config: &Config, data_packages: Vec) -> Matrix { + let mut matrix = vec![vec![None; config.signers.len()]; config.feed_ids.len()]; + + data_packages.iter().for_each(|data_package| { + if let Some(signer_index) = config.signer_index(&data_package.signer_address) { + data_package.data_points.iter().for_each(|data_point| { + if let Some(feed_index) = config.feed_index(data_point.feed_id) { + matrix[feed_index][signer_index] = data_point.value.into() + } + }) + } + }); + + print_debug!("{:?}", matrix); + + matrix +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod aggregate_matrix_tests { + use crate::{ + core::{aggregator::aggregate_matrix, config::Config}, + helpers::iter_into::{IterInto, IterIntoOpt, OptIterIntoOpt}, + }; + + #[test] + fn test_aggregate_matrix() { + let matrix = vec![ + vec![11u8, 13].iter_into_opt(), + vec![21u8, 23].iter_into_opt(), + ]; + + for signer_count_threshold in 0..Config::test().signers.len() + 1 { + let mut config = Config::test(); + config.signer_count_threshold = signer_count_threshold as u8; + + let result = aggregate_matrix(matrix.clone(), config); + + assert_eq!(result, vec![12u8, 22].iter_into()); + } + } + + #[test] + fn test_aggregate_matrix_smaller_threshold_missing_one_value() { + let mut config = Config::test(); + config.signer_count_threshold = 1; + + let matrix = vec![ + vec![11u8, 13].iter_into_opt(), + vec![21u8.into(), None].opt_iter_into_opt(), + ]; + + let result = aggregate_matrix(matrix, config); + + assert_eq!(result, vec![12u8, 21].iter_into()); + } + + #[should_panic(expected = "Array is empty")] + #[test] + fn test_aggregate_matrix_smaller_threshold_missing_whole_feed() { + let mut config = Config::test(); + config.signer_count_threshold = 0; + + let matrix = vec![vec![11u8, 13].iter_into_opt(), vec![None; 2]]; + + aggregate_matrix(matrix, config); + } + + #[should_panic(expected = "Insufficient signer count 1 for #0 (ETH)")] + #[test] + fn test_aggregate_matrix_missing_one_value() { + let matrix = vec![ + vec![21u8.into(), None].opt_iter_into_opt(), + vec![11u8, 12].iter_into_opt(), + ]; + + aggregate_matrix(matrix, Config::test()); + } + + #[should_panic(expected = "Insufficient signer count 0 for #1 (BTC)")] + #[test] + fn test_aggregate_matrix_missing_whole_feed() { + let matrix = vec![vec![11u8, 13].iter_into_opt(), vec![None; 2]]; + + aggregate_matrix(matrix, Config::test()); + } +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod make_value_signer_matrix { + use crate::{ + core::{ + aggregator::{make_value_signer_matrix, Matrix}, + config::Config, + test_helpers::{AVAX, BTC, ETH, TEST_SIGNER_ADDRESS_1, TEST_SIGNER_ADDRESS_2}, + }, + helpers::iter_into::IterInto, + network::specific::U256, + protocol::data_package::DataPackage, + }; + + #[test] + fn test_make_value_signer_matrix_empty() { + let config = Config::test(); + + test_make_value_signer_matrix_of( + vec![], + vec![vec![None; config.signers.len()]; config.feed_ids.len()], + ); + } + + #[test] + fn test_make_value_signer_matrix_exact() { + let data_packages = vec![ + DataPackage::test(ETH, 11, TEST_SIGNER_ADDRESS_1, None), + DataPackage::test(ETH, 12, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(BTC, 22, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(BTC, 21, TEST_SIGNER_ADDRESS_1, None), + ]; + + test_make_value_signer_matrix_of( + data_packages, + vec![vec![11, 12].iter_into(), vec![21, 22].iter_into()], + ); + } + + #[test] + fn test_make_value_signer_matrix_greater() { + let data_packages = vec![ + DataPackage::test(ETH, 11, TEST_SIGNER_ADDRESS_1, None), + DataPackage::test(ETH, 12, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(BTC, 22, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(BTC, 21, TEST_SIGNER_ADDRESS_1, None), + DataPackage::test(AVAX, 31, TEST_SIGNER_ADDRESS_1, None), + DataPackage::test(AVAX, 32, TEST_SIGNER_ADDRESS_2, None), + ]; + + test_make_value_signer_matrix_of( + data_packages, + vec![vec![11, 12].iter_into(), vec![21, 22].iter_into()], + ); + } + + #[test] + fn test_make_value_signer_matrix_smaller() { + let data_packages = vec![ + DataPackage::test(ETH, 11, TEST_SIGNER_ADDRESS_1, None), + DataPackage::test(ETH, 12, TEST_SIGNER_ADDRESS_2, None), + ]; + + test_make_value_signer_matrix_of( + data_packages, + vec![vec![11, 12].iter_into(), vec![None; 2]], + ); + } + + #[test] + fn test_make_value_signer_matrix_diagonal() { + let data_packages = vec![ + DataPackage::test(BTC, 22, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(ETH, 11, TEST_SIGNER_ADDRESS_1, None), + ]; + + test_make_value_signer_matrix_of( + data_packages, + vec![vec![11.into(), None], vec![None, 22.into()]], + ); + } + + #[test] + fn test_make_value_signer_matrix_repetitions() { + let data_packages = vec![ + DataPackage::test(BTC, 21, TEST_SIGNER_ADDRESS_1, None), + DataPackage::test(BTC, 22, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(BTC, 202, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(ETH, 11, TEST_SIGNER_ADDRESS_1, None), + DataPackage::test(ETH, 101, TEST_SIGNER_ADDRESS_1, None), + DataPackage::test(ETH, 12, TEST_SIGNER_ADDRESS_2, None), + ]; + + test_make_value_signer_matrix_of( + data_packages, + vec![vec![101, 12].iter_into(), vec![21, 202].iter_into()], + ); + } + + #[test] + fn test_make_value_signer_matrix_all_wrong() { + let config = Config::test(); + + let data_packages = vec![ + DataPackage::test(AVAX, 32, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(AVAX, 31, TEST_SIGNER_ADDRESS_1, None), + ]; + + test_make_value_signer_matrix_of( + data_packages, + vec![vec![None; config.signers.len()]; config.feed_ids.len()], + ); + } + + #[test] + fn test_make_value_signer_matrix_mix() { + let data_packages = vec![ + DataPackage::test(ETH, 11, TEST_SIGNER_ADDRESS_1, None), + DataPackage::test(ETH, 12, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(AVAX, 32, TEST_SIGNER_ADDRESS_2, None), + DataPackage::test(AVAX, 31, TEST_SIGNER_ADDRESS_1, None), + ]; + + test_make_value_signer_matrix_of( + data_packages, + vec![vec![11, 12].iter_into(), vec![None; 2]], + ); + } + + fn test_make_value_signer_matrix_of( + data_packages: Vec, + expected_values: Vec>>, + ) { + let config = &Config::test(); + let result = make_value_signer_matrix(config, data_packages); + + let expected_matrix: Matrix = expected_values + .iter() + .map(|row| { + (row.iter() + .map(|&value| value.map(U256::from)) + .collect::>()) + .iter_into() as Vec> + }) + .collect(); + + assert_eq!(result, expected_matrix) + } +} diff --git a/src/core/config.rs b/src/core/config.rs new file mode 100644 index 0000000..50f5b45 --- /dev/null +++ b/src/core/config.rs @@ -0,0 +1,31 @@ +use crate::network::specific::{Bytes, U256}; + +/// Configuration for a RedStone payload processor. +/// +/// Specifies the parameters necessary for the verification and aggregation of values +/// from various data points passed by the RedStone payload. +#[derive(Debug)] +pub struct Config { + /// The minimum number of signers required validating the data. + /// + /// Specifies how many unique signers (from different addresses) are required + /// for the data to be considered valid and trustworthy. + pub signer_count_threshold: u8, + + /// List of identifiers for signers authorized to sign the data. + /// + /// Each signer is identified by a unique, network-specific byte string (`Bytes`), + /// which represents their address. + pub signers: Vec, + + /// Identifiers for the data feeds from which values are aggregated. + /// + /// Each data feed id is represented by the network-specific `U256` type. + pub feed_ids: Vec, + + /// The current block time in timestamp format, used for verifying data timeliness. + /// + /// The value's been expressed in milliseconds since the Unix epoch (January 1, 1970) and allows + /// for determining whether the data is current in the context of blockchain time. + pub block_timestamp: u64, +} diff --git a/src/core/mod.rs b/src/core/mod.rs new file mode 100644 index 0000000..0867daa --- /dev/null +++ b/src/core/mod.rs @@ -0,0 +1,10 @@ +pub mod config; +pub mod processor; +pub mod processor_result; + +mod aggregator; +mod validator; + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod test_helpers; diff --git a/src/core/processor.rs b/src/core/processor.rs new file mode 100644 index 0000000..ecd4428 --- /dev/null +++ b/src/core/processor.rs @@ -0,0 +1,101 @@ +use crate::{ + core::{ + aggregator::aggregate_values, config::Config, processor_result::ProcessorResult, + validator::Validator, + }, + network::specific::Bytes, + print_debug, + protocol::payload::Payload, +}; + +/// The main processor of the RedStone payload. +/// +/// +/// # Arguments +/// +/// * `config` - Configuration of the payload processing. +/// * `payload_bytes` - Network-specific byte-list of the payload to be processed. +pub fn process_payload(config: Config, payload_bytes: Bytes) -> ProcessorResult { + #[allow(clippy::useless_conversion)] + let mut bytes: Vec = payload_bytes.into(); + let payload = Payload::make(&mut bytes); + print_debug!("{:?}", payload); + + make_processor_result(config, payload) +} + +fn make_processor_result(config: Config, payload: Payload) -> ProcessorResult { + let min_timestamp = payload + .data_packages + .iter() + .enumerate() + .map(|(index, dp)| config.validate_timestamp(index, dp.timestamp)) + .min() + .unwrap(); + + let values = aggregate_values(config, payload.data_packages); + + print_debug!("{} {:?}", min_timestamp, values); + + ProcessorResult { + values, + min_timestamp, + } +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod tests { + use crate::{ + core::{ + config::Config, + processor::make_processor_result, + processor_result::ProcessorResult, + test_helpers::{ + BTC, ETH, TEST_BLOCK_TIMESTAMP, TEST_SIGNER_ADDRESS_1, TEST_SIGNER_ADDRESS_2, + }, + }, + helpers::iter_into::IterInto, + protocol::{data_package::DataPackage, payload::Payload}, + }; + + #[test] + fn test_make_processor_result() { + let data_packages = vec![ + DataPackage::test( + ETH, + 11, + TEST_SIGNER_ADDRESS_1, + (TEST_BLOCK_TIMESTAMP + 5).into(), + ), + DataPackage::test( + ETH, + 13, + TEST_SIGNER_ADDRESS_2, + (TEST_BLOCK_TIMESTAMP + 3).into(), + ), + DataPackage::test( + BTC, + 32, + TEST_SIGNER_ADDRESS_2, + (TEST_BLOCK_TIMESTAMP - 2).into(), + ), + DataPackage::test( + BTC, + 31, + TEST_SIGNER_ADDRESS_1, + (TEST_BLOCK_TIMESTAMP + 400).into(), + ), + ]; + + let result = make_processor_result(Config::test(), Payload { data_packages }); + + assert_eq!( + result, + ProcessorResult { + min_timestamp: TEST_BLOCK_TIMESTAMP - 2, + values: vec![12u8, 31].iter_into() + } + ); + } +} diff --git a/src/core/processor_result.rs b/src/core/processor_result.rs new file mode 100644 index 0000000..71671fe --- /dev/null +++ b/src/core/processor_result.rs @@ -0,0 +1,26 @@ +use crate::network::specific::U256; + +/// Represents the result of processing the RedStone payload. +/// +/// This structure is used to encapsulate the outcome of a RedStone payload processing operation, +/// particularly focusing on time-sensitive data and its associated values, according to the `Config`. +#[derive(Debug, Eq, PartialEq)] +pub struct ProcessorResult { + /// The minimum timestamp encountered during processing. + /// + /// This field captures the earliest time point (in milliseconds since the Unix epoch) + /// among the processed data packages, indicating the starting boundary of the dataset's time range. + pub min_timestamp: u64, + + /// A collection of values processed during the operation. + /// + /// Each element in this vector represents a processed value corresponding + /// to the passed data_feed item in the `Config`. + pub values: Vec, +} + +impl From for (u64, Vec) { + fn from(result: ProcessorResult) -> Self { + (result.min_timestamp, result.values) + } +} diff --git a/src/core/test_helpers.rs b/src/core/test_helpers.rs new file mode 100644 index 0000000..6938bad --- /dev/null +++ b/src/core/test_helpers.rs @@ -0,0 +1,50 @@ +use crate::{ + core::config::Config, + helpers::hex::{hex_to_bytes, make_feed_id, make_feed_ids}, + protocol::{data_package::DataPackage, data_point::DataPoint}, +}; + +pub(crate) const TEST_BLOCK_TIMESTAMP: u64 = 2000000000000; + +pub(crate) const TEST_SIGNER_ADDRESS_1: &str = "1ea62d73edF8ac05dfcea1a34b9796e937a29eFF"; +pub(crate) const TEST_SIGNER_ADDRESS_2: &str = "109b4a318a4f5ddcbca6349b45f881b4137deafb"; + +pub(crate) const ETH: &str = "ETH"; +pub(crate) const BTC: &str = "BTC"; +pub(crate) const AVAX: &str = "AVAX"; + +impl Config { + pub(crate) fn test() -> Self { + Self::test_with_feed_ids(vec!["ETH", "BTC"]) + } + + pub(crate) fn test_with_feed_ids(feed_ids: Vec<&str>) -> Self { + Self { + signer_count_threshold: 2, + signers: vec![ + hex_to_bytes(TEST_SIGNER_ADDRESS_1.into()).into(), + hex_to_bytes(TEST_SIGNER_ADDRESS_2.into()).into(), + ], + feed_ids: make_feed_ids(feed_ids), + block_timestamp: TEST_BLOCK_TIMESTAMP, + } + } +} + +impl DataPackage { + pub(crate) fn test( + feed_id: &str, + value: u128, + signer_address: &str, + timestamp: Option, + ) -> Self { + DataPackage { + signer_address: hex_to_bytes(signer_address.into()), + timestamp: timestamp.unwrap_or(TEST_BLOCK_TIMESTAMP), + data_points: vec![DataPoint { + feed_id: make_feed_id(feed_id), + value: value.into(), + }], + } + } +} diff --git a/src/core/validator.rs b/src/core/validator.rs new file mode 100644 index 0000000..35ebce2 --- /dev/null +++ b/src/core/validator.rs @@ -0,0 +1,286 @@ +use crate::{ + core::config::Config, + network::{ + assert::Assert, + error::Error::{InsufficientSignerCount, TimestampTooFuture, TimestampTooOld}, + specific::U256, + }, + protocol::constants::{MAX_TIMESTAMP_AHEAD_MS, MAX_TIMESTAMP_DELAY_MS}, + utils::filter::FilterSome, +}; + +/// A trait defining validation operations for data feeds and signers. +/// +/// This trait specifies methods for validating aspects of data feeds and signers within a system that +/// requires data integrity and authenticity checks. Implementations of this trait are responsible for +/// defining the logic behind each validation step, ensuring that data conforms to expected rules and +/// conditions. +pub(crate) trait Validator { + /// Retrieves the index of a given data feed. + /// + /// This method takes a `feed_id` representing the unique identifier of a data feed and + /// returns an `Option` indicating the index of the feed within a collection of feeds. + /// If the feed does not exist, `None` is returned. + /// + /// # Arguments + /// + /// * `feed_id`: `U256` - The unique identifier of the data feed. + /// + /// # Returns + /// + /// * `Option` - The index of the feed if it exists, or `None` if it does not. + fn feed_index(&self, feed_id: U256) -> Option; + + /// Retrieves the index of a given signer. + /// + /// This method accepts a signer identifier in the form of a byte slice and returns an + /// `Option` indicating the signer's index within a collection of signers. If the signer + /// is not found, `None` is returned. + /// + /// # Arguments + /// + /// * `signer`: `&[u8]` - A byte slice representing the signer's identifier. + /// + /// # Returns + /// + /// * `Option` - The index of the signer if found, or `None` if not found. + fn signer_index(&self, signer: &[u8]) -> Option; + + /// Validates the signer count threshold for a given index within a set of values. + /// + /// This method is responsible for ensuring that the number of valid signers meets or exceeds + /// a specified threshold necessary for a set of data values to be considered valid. It returns + /// a vector of `U256` if the values pass the validation, to be processed in other steps. + /// + /// # Arguments + /// + /// * `index`: `usize` - The index of the data value being validated. + /// * `values`: `&[Option]` - A slice of optional `U256` values associated with the data. + /// + /// # Returns + /// + /// * `Vec` - A vector of `U256` values that meet the validation criteria. + fn validate_signer_count_threshold(&self, index: usize, values: &[Option]) -> Vec; + + /// Validates the timestamp for a given index. + /// + /// This method checks whether a timestamp associated with a data value at a given index + /// meets specific conditions (e.g., being within an acceptable time range). It returns + /// the validated timestamp if it's valid, to be processed in other steps. + /// + /// # Arguments + /// + /// * `index`: `usize` - The index of the data value whose timestamp is being validated. + /// * `timestamp`: `u64` - The timestamp to be validated. + /// + /// # Returns + /// + /// * `u64` - The validated timestamp. + fn validate_timestamp(&self, index: usize, timestamp: u64) -> u64; +} + +impl Validator for Config { + #[inline] + fn feed_index(&self, feed_id: U256) -> Option { + self.feed_ids.iter().position(|&elt| elt == feed_id) + } + + #[inline] + fn signer_index(&self, signer: &[u8]) -> Option { + self.signers + .iter() + .position(|elt| elt.to_ascii_lowercase() == signer.to_ascii_lowercase()) + } + + #[inline] + fn validate_signer_count_threshold(&self, index: usize, values: &[Option]) -> Vec { + values.filter_some().assert_or_revert( + |x| (*x).len() >= self.signer_count_threshold.into(), + #[allow(clippy::useless_conversion)] + |val| InsufficientSignerCount(index, val.len(), self.feed_ids[index]), + ) + } + + #[inline] + fn validate_timestamp(&self, index: usize, timestamp: u64) -> u64 { + timestamp.assert_or_revert( + |&x| x + MAX_TIMESTAMP_DELAY_MS >= self.block_timestamp, + |timestamp| TimestampTooOld(index, *timestamp), + ); + + timestamp.assert_or_revert( + |&x| x <= self.block_timestamp + MAX_TIMESTAMP_AHEAD_MS, + |timestamp| TimestampTooFuture(index, *timestamp), + ) + } +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod tests { + use crate::{ + core::{ + config::Config, + test_helpers::{ + AVAX, BTC, ETH, TEST_BLOCK_TIMESTAMP, TEST_SIGNER_ADDRESS_1, TEST_SIGNER_ADDRESS_2, + }, + validator::Validator, + }, + helpers::{ + hex::{hex_to_bytes, make_feed_id}, + iter_into::{IterInto, IterIntoOpt, OptIterIntoOpt}, + }, + network::specific::U256, + protocol::constants::{MAX_TIMESTAMP_AHEAD_MS, MAX_TIMESTAMP_DELAY_MS}, + }; + use itertools::Itertools; + + #[test] + fn test_feed_index() { + let config = Config::test(); + + let eth_index = config.feed_index(make_feed_id(ETH)); + assert_eq!(eth_index, 0.into()); + + let eth_index = config.feed_index(make_feed_id("778680")); //eth + assert_eq!(eth_index, None); + + let btc_index = config.feed_index(make_feed_id(BTC)); + assert_eq!(btc_index, 1.into()); + + let avax_index = config.feed_index(make_feed_id(AVAX)); + assert_eq!(avax_index, None); + } + + #[test] + fn test_signer_index() { + let config = Config::test(); + let index = config.signer_index(&hex_to_bytes(TEST_SIGNER_ADDRESS_1.into())); + assert_eq!(index, 0.into()); + + let index = config.signer_index(&hex_to_bytes(TEST_SIGNER_ADDRESS_1.to_uppercase())); + assert_eq!(index, 0.into()); + + let index = config.signer_index(&hex_to_bytes(TEST_SIGNER_ADDRESS_2.into())); + assert_eq!(index, 1.into()); + + let index = config.signer_index(&hex_to_bytes(TEST_SIGNER_ADDRESS_2.replace('0', "1"))); + assert_eq!(index, None); + } + + #[test] + fn test_validate_timestamp() { + let config = Config::test(); + + config.validate_timestamp(0, TEST_BLOCK_TIMESTAMP); + config.validate_timestamp(1, TEST_BLOCK_TIMESTAMP + 60000); + config.validate_timestamp(2, TEST_BLOCK_TIMESTAMP + MAX_TIMESTAMP_AHEAD_MS); + config.validate_timestamp(3, TEST_BLOCK_TIMESTAMP - MAX_TIMESTAMP_DELAY_MS); + config.validate_timestamp(4, TEST_BLOCK_TIMESTAMP - 60000); + } + + #[should_panic(expected = "Timestamp 2000000180001 is too future for #0")] + #[test] + fn test_validate_timestamp_too_future() { + Config::test().validate_timestamp(0, TEST_BLOCK_TIMESTAMP + MAX_TIMESTAMP_AHEAD_MS + 1); + } + + #[should_panic(expected = "Timestamp 1999999099999 is too old for #1")] + #[test] + fn test_validate_timestamp_too_old() { + Config::test().validate_timestamp(1, TEST_BLOCK_TIMESTAMP - MAX_TIMESTAMP_DELAY_MS - 1); + } + + #[should_panic(expected = "Timestamp 0 is too old for #2")] + #[test] + fn test_validate_timestamp_zero() { + Config::test().validate_timestamp(2, 0); + } + + #[should_panic(expected = "Timestamp 4000000000000 is too future for #3")] + #[test] + fn test_validate_timestamp_big() { + Config::test().validate_timestamp(3, TEST_BLOCK_TIMESTAMP + TEST_BLOCK_TIMESTAMP); + } + + #[should_panic(expected = "Timestamp 2000000000000 is too future for #4")] + #[test] + fn test_validate_timestamp_no_block_timestamp() { + let mut config = Config::test(); + + config.block_timestamp = 0; + config.validate_timestamp(4, TEST_BLOCK_TIMESTAMP); + } + + #[should_panic(expected = "Insufficient signer count 0 for #0 (ETH)")] + #[test] + fn test_validate_signer_count_threshold_empty_list() { + Config::test().validate_signer_count_threshold(0, vec![].as_slice()); + } + + #[should_panic(expected = "Insufficient signer count 1 for #1 (BTC)")] + #[test] + fn test_validate_signer_count_threshold_shorter_list() { + Config::test().validate_signer_count_threshold(1, vec![1u8].iter_into_opt().as_slice()); + } + + #[should_panic(expected = "Insufficient signer count 1 for #1 (BTC)")] + #[test] + fn test_validate_signer_count_threshold_list_with_nones() { + Config::test().validate_signer_count_threshold( + 1, + vec![None, 1u8.into(), None].opt_iter_into_opt().as_slice(), + ); + } + + #[test] + fn test_validate_signer_count_threshold_with_exact_size() { + validate_with_all_permutations(vec![1u8, 2].iter_into_opt(), vec![1u8, 2].iter_into()); + } + + #[test] + fn test_validate_signer_count_threshold_with_exact_signer_count() { + validate_with_all_permutations( + vec![None, 1u8.into(), None, 2.into()].opt_iter_into_opt(), + vec![1u8, 2].iter_into(), + ); + } + + #[test] + fn test_validate_signer_count_threshold_with_larger_size() { + validate_with_all_permutations( + vec![ + 1u8.into(), + None, + None, + 2.into(), + 3.into(), + None, + 4.into(), + None, + ] + .opt_iter_into_opt(), + vec![1u8, 2, 3, 4].iter_into(), + ); + } + + fn validate_with_all_permutations(numbers: Vec>, expected_value: Vec) { + let perms: Vec> = numbers.iter().permutations(numbers.len()).collect(); + let mut config = Config::test(); + + let result = config.validate_signer_count_threshold(0, &numbers); + assert_eq!(result, expected_value); + + for threshold in 0..expected_value.len() + 1 { + config.signer_count_threshold = threshold as u8; + + for (index, perm) in perms.iter().enumerate() { + let p: Vec<_> = perm.iter().map(|&&v| v).collect(); + + let result = + config.validate_signer_count_threshold(index % config.feed_ids.len(), &p); + assert_eq!(result.len(), expected_value.len()); + } + } + } +} diff --git a/src/crypto/keccak256.rs b/src/crypto/keccak256.rs new file mode 100644 index 0000000..315006f --- /dev/null +++ b/src/crypto/keccak256.rs @@ -0,0 +1,36 @@ +use sha3::{Digest, Keccak256}; + +pub fn keccak256(data: &[u8]) -> Box<[u8]> { + Keccak256::new_with_prefix(data) + .finalize() + .as_slice() + .into() +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod tests { + use crate::{crypto::keccak256::keccak256, helpers::hex::hex_to_bytes}; + + const MESSAGE: &str = "415641580000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d394303d018d79bf0ba000000020000001"; + const MESSAGE_HASH: &str = "f0805644755393876d0e917e553f0c206f8bc68b7ebfe73a79d2a9e7f5a4cea6"; + const EMPTY_MESSAGE_HASH: &str = + "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"; + + #[test] + fn test_keccak256() { + let hash = keccak256(hex_to_bytes(MESSAGE.into()).as_slice()); + + assert_eq!(hash.as_ref(), hex_to_bytes(MESSAGE_HASH.into()).as_slice()); + } + + #[test] + fn test_keccak256_empty() { + let hash = keccak256(vec![].as_slice()); + + assert_eq!( + hash.as_ref(), + hex_to_bytes(EMPTY_MESSAGE_HASH.into()).as_slice() + ); + } +} diff --git a/src/crypto/mod.rs b/src/crypto/mod.rs new file mode 100644 index 0000000..1bb66ff --- /dev/null +++ b/src/crypto/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod recover; + +mod keccak256; diff --git a/src/crypto/recover.rs b/src/crypto/recover.rs new file mode 100644 index 0000000..70c7ba0 --- /dev/null +++ b/src/crypto/recover.rs @@ -0,0 +1,133 @@ +use crate::crypto::{keccak256, recover::crypto256::recover_public_key}; + +pub fn recover_address(message: Vec, signature: Vec) -> Vec { + let recovery_byte = signature[64]; // 65-byte representation + let msg_hash = keccak256::keccak256(message.as_slice()); + let key = recover_public_key( + msg_hash, + &signature[..64], + recovery_byte - (if recovery_byte >= 27 { 27 } else { 0 }), + ); + let key_hash = keccak256::keccak256(&key[1..]); // skip first uncompressed-key byte + + key_hash[12..].into() // last 20 bytes +} + +#[cfg(feature = "crypto_secp256k1")] +pub mod crypto256 { + use crate::network::{assert::Unwrap, error::Error}; + use secp256k1::{ecdsa::RecoverableSignature, Message, Secp256k1 as Secp256k1Curve}; + + pub(crate) fn recover_public_key( + message_hash: Box<[u8]>, + signature_bytes: &[u8], + recovery_byte: u8, + ) -> Box<[u8]> { + let msg = Message::from_digest_slice(message_hash.as_ref()) + .unwrap_or_revert(|_| Error::CryptographicError(message_hash.len())); + + let recovery_id = secp256k1::ecdsa::RecoveryId::from_i32(recovery_byte.into()) + .unwrap_or_revert(|_| Error::CryptographicError(recovery_byte.into())); + + let sig: RecoverableSignature = + RecoverableSignature::from_compact(signature_bytes, recovery_id) + .unwrap_or_revert(|_| Error::CryptographicError(signature_bytes.len())); + + let public_key = Secp256k1Curve::new().recover_ecdsa(&msg, &sig); + + public_key.unwrap().serialize_uncompressed().into() + } +} + +#[cfg(feature = "crypto_k256")] +pub mod crypto256 { + use crate::network::{assert::Unwrap, error::Error}; + use k256::ecdsa::{RecoveryId, Signature, VerifyingKey}; + + pub fn recover_public_key( + message_hash: Box<[u8]>, + signature_bytes: &[u8], + recovery_byte: u8, + ) -> Box<[u8]> { + let recovery_id = RecoveryId::from_byte(recovery_byte) + .unwrap_or_revert(|_| Error::CryptographicError(recovery_byte.into())); + + let signature = Signature::try_from(signature_bytes) + .unwrap_or_revert(|_| Error::CryptographicError(signature_bytes.len())); + + let recovered_key = + VerifyingKey::recover_from_prehash(message_hash.as_ref(), &signature, recovery_id) + .map(|key| key.to_encoded_point(false).to_bytes()); + + recovered_key.unwrap() + } +} + +#[cfg(all(not(feature = "crypto_k256"), not(feature = "crypto_secp256k1")))] +pub mod crypto256 { + pub fn recover_public_key( + _message_hash: Box<[u8]>, + _signature_bytes: &[u8], + _recovery_byte: u8, + ) -> Box<[u8]> { + panic!("Not implemented!") + } +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod tests { + use crate::{ + crypto::recover::{crypto256::recover_public_key, recover_address}, + helpers::hex::hex_to_bytes, + }; + + const MESSAGE: &str = "415641580000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d394303d018d79bf0ba000000020000001"; + const MESSAGE_HASH: &str = "f0805644755393876d0e917e553f0c206f8bc68b7ebfe73a79d2a9e7f5a4cea6"; + const SIG_V27: &str = "475195641dae43318e194c3d9e5fc308773d6fdf5e197e02644dfd9ca3d19e3e2bd7d8656428f7f02e658a16b8f83722169c57126cc50bec8fad188b1bac6d19"; + const SIG_V28: &str = "c88242d22d88252c845b946c9957dbf3c7d59a3b69ecba2898198869f9f146ff268c3e47a11dbb05cc5198aadd659881817a59ee37e088d3253f4695927428c1"; + const PUBLIC_KEY_V27: &str = + "04f5f035588502146774d0ccfd62ee5bf1d7f1dbb96aae33a79765c636b8ec75a36f5121931b5cc37215a7d4280c5700ca92daaaf93c32b06ca9f98b1f4ece624e"; + const PUBLIC_KEY_V28: &str = + "04626f2ad2cfb0b41a24276d78de8959bcf45fc5e80804416e660aab2089d15e98206526e639ee19d17c8f9ae0ce3a6ff1a8ea4ab773d0fb4214e08aad7ba978c8"; + const ADDRESS_V27: &str = "2c59617248994D12816EE1Fa77CE0a64eEB456BF"; + const ADDRESS_V28: &str = "12470f7aBA85c8b81D63137DD5925D6EE114952b"; + + #[test] + fn test_recover_public_key_v27() { + let public_key = recover_public_key(u8_box(MESSAGE_HASH), &u8_box(SIG_V27), 0); + + assert_eq!(u8_box(PUBLIC_KEY_V27), public_key); + } + + #[test] + fn test_recover_public_key_v28() { + let public_key = recover_public_key(u8_box(MESSAGE_HASH), &u8_box(SIG_V28), 1); + + assert_eq!(u8_box(PUBLIC_KEY_V28), public_key); + } + + #[test] + fn test_recover_address_1b() { + let address = recover_address( + hex_to_bytes(MESSAGE.into()), + hex_to_bytes(SIG_V27.to_owned() + "1b"), + ); + + assert_eq!(hex_to_bytes(ADDRESS_V27.into()), address); + } + + #[test] + fn test_recover_address_1c() { + let address = recover_address( + hex_to_bytes(MESSAGE.into()), + hex_to_bytes(SIG_V28.to_owned() + "1c"), + ); + + assert_eq!(hex_to_bytes(ADDRESS_V28.into()), address); + } + + fn u8_box(str: &str) -> Box<[u8]> { + hex_to_bytes(str.into()).as_slice().into() + } +} diff --git a/src/helpers/hex.rs b/src/helpers/hex.rs new file mode 100644 index 0000000..055eff2 --- /dev/null +++ b/src/helpers/hex.rs @@ -0,0 +1,40 @@ +use crate::network::specific::{Bytes, FromBytesRepr, U256}; +use hex::{decode, encode}; +use std::{fs::File, io::Read}; + +pub fn hex_to_bytes(hex_str: String) -> Vec { + let trimmed_hex = hex_str.trim_start_matches("0x"); + + decode(trimmed_hex).expect("Conversion error") +} + +pub fn hex_from>(bytes: T) -> String { + encode(bytes) +} + +pub fn make_bytes(vec: Vec<&str>, fun: fn(&str) -> String) -> Vec { + vec.iter() + .map(|addr| hex_to_bytes(fun(addr)).into()) + .collect() +} + +pub fn make_feed_id(s: &str) -> U256 { + U256::from_bytes_repr(hex_to_bytes(encode(s))) +} + +pub fn make_feed_ids(vec: Vec<&str>) -> Vec { + vec.iter().map(|&s| make_feed_id(s)).collect() +} + +pub fn read_payload_hex(path: &str) -> String { + let mut file = File::open(path).unwrap(); + let mut contents = String::new(); + file.read_to_string(&mut contents).expect("Read error"); + contents +} + +pub fn read_payload_bytes(path: &str) -> Vec { + let contents = read_payload_hex(path); + + hex_to_bytes(contents) +} diff --git a/src/helpers/iter_into.rs b/src/helpers/iter_into.rs new file mode 100644 index 0000000..3bd73e8 --- /dev/null +++ b/src/helpers/iter_into.rs @@ -0,0 +1,75 @@ +pub trait IterInto { + fn iter_into(&self) -> U; +} + +impl> IterInto> for Vec { + fn iter_into(&self) -> Vec { + self.iter().map(|&value| value.into()).collect() + } +} + +pub trait OptIterIntoOpt { + fn opt_iter_into_opt(&self) -> U; +} + +impl> OptIterIntoOpt>> for Vec> { + fn opt_iter_into_opt(&self) -> Vec> { + self.iter().map(|&value| value.map(|v| v.into())).collect() + } +} + +pub trait IterIntoOpt { + fn iter_into_opt(&self) -> U; +} + +impl> IterIntoOpt>> for Vec { + fn iter_into_opt(&self) -> Vec> { + self.iter_into().iter_into() + } +} + +#[cfg(test)] +mod iter_into_tests { + use crate::{ + helpers::iter_into::{IterInto, IterIntoOpt, OptIterIntoOpt}, + network::specific::U256, + }; + + #[test] + fn test_iter_into() { + let values = vec![23u128, 12, 12, 23]; + + assert_eq!( + values.iter_into() as Vec, + vec![23u8.into(), 12u8.into(), 12u8.into(), 23u8.into()] + ) + } + + #[test] + fn test_iter_into_opt() { + let values: Vec = vec![23u8, 12, 12, 23]; + + assert_eq!( + values.iter_into_opt(), + vec![Some(23u8), 12u8.into(), 12u8.into(), 23u8.into()] + ) + } + + #[test] + fn test_opt_iter_into_opt() { + let values: Vec> = + vec![Some(23u128), 12.into(), 12.into(), None, 23.into(), None]; + + assert_eq!( + values.opt_iter_into_opt() as Vec>, + vec![ + Some(U256::from(23u8)), + U256::from(12u8).into(), + U256::from(12u8).into(), + None, + U256::from(23u8).into(), + None + ] + ) + } +} diff --git a/src/helpers/mod.rs b/src/helpers/mod.rs new file mode 100644 index 0000000..1cbe85c --- /dev/null +++ b/src/helpers/mod.rs @@ -0,0 +1,2 @@ +pub mod hex; +pub mod iter_into; diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..9cb09de --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,25 @@ +//! # RedStone +//! +//! `redstone` is a collection of utilities to make deserializing&decrypting RedStone payload. +//! It contains a pure Rust implementation and also an extension for the Casper network. +//! +//! Different crypto-mechanisms are easily injectable. +//! The current implementation contains `secp256k1`- and `k256`-based variants. + +#[cfg(feature = "core")] +pub mod core; + +#[cfg(feature = "core")] +mod crypto; + +#[cfg(feature = "core")] +mod protocol; + +#[cfg(feature = "core")] +mod utils; + +#[cfg(feature = "network")] +pub mod network; + +#[cfg(feature = "helpers")] +pub mod helpers; diff --git a/src/network/as_str.rs b/src/network/as_str.rs new file mode 100644 index 0000000..5384ac9 --- /dev/null +++ b/src/network/as_str.rs @@ -0,0 +1,75 @@ +extern crate alloc; + +use crate::network::specific::U256; +use alloc::{format, string::String}; + +pub trait AsHexStr { + fn as_hex_str(&self) -> String; +} + +impl AsHexStr for &[u8] { + fn as_hex_str(&self) -> String { + self.iter().map(|byte| format!("{:02x}", byte)).collect() + } +} + +#[cfg(feature = "network_casper")] +impl AsHexStr for casper_types::bytesrepr::Bytes { + fn as_hex_str(&self) -> String { + self.as_slice().as_hex_str() + } +} + +impl AsHexStr for U256 { + fn as_hex_str(&self) -> String { + format!("{:X}", self) + } +} + +impl AsHexStr for Vec { + fn as_hex_str(&self) -> String { + self.as_slice().as_hex_str() + } +} + +impl AsHexStr for Box<[u8]> { + fn as_hex_str(&self) -> String { + self.as_ref().as_hex_str() + } +} + +pub trait AsAsciiStr { + fn as_ascii_str(&self) -> String; +} + +impl AsAsciiStr for &[u8] { + fn as_ascii_str(&self) -> String { + self.iter().map(|&code| code as char).collect() + } +} + +impl AsAsciiStr for Vec { + fn as_ascii_str(&self) -> String { + self.as_slice().as_ascii_str() + } +} + +#[cfg(feature = "network_casper")] +impl AsAsciiStr for casper_types::bytesrepr::Bytes { + fn as_ascii_str(&self) -> String { + self.as_slice().as_ascii_str() + } +} + +impl AsAsciiStr for U256 { + fn as_ascii_str(&self) -> String { + let hex_string = self.as_hex_str(); + let bytes = (0..hex_string.len()) + .step_by(2) + .map(|i| u8::from_str_radix(&hex_string[i..i + 2], 16)) + .collect::, _>>() + .unwrap(); + + bytes.as_ascii_str() + } +} diff --git a/src/network/assert.rs b/src/network/assert.rs new file mode 100644 index 0000000..6ae2d1a --- /dev/null +++ b/src/network/assert.rs @@ -0,0 +1,136 @@ +use crate::{ + network::{error::Error, specific::revert}, + print_debug, +}; +use std::fmt::Debug; + +pub trait Assert { + fn assert_or_revert Error>(self, check: F, error: E) -> Self; +} + +impl Assert for T +where + T: Debug, + F: Fn(&Self) -> bool, +{ + fn assert_or_revert Error>(self, check: F, error: E) -> Self { + assert_or_revert(self, check, error) + } +} + +#[inline] +fn assert_or_revert Error>(arg: T, check: F, error: E) -> T +where + F: Fn(&T) -> bool, + T: Debug, +{ + assert_or_revert_bool_with(check(&arg), || error(&arg)); + + arg +} + +#[inline] +fn assert_or_revert_bool_with Error>(check: bool, error: E) { + if check { + return; + } + + let error = error(); + print_debug!("REVERT({}) - {}!", &error.code(), error); + revert(error); +} + +pub trait Unwrap { + type ErrorArg; + + fn unwrap_or_revert Error>(self, error: E) -> R; +} + +impl Unwrap for Option +where + T: Debug, +{ + type ErrorArg = (); + + fn unwrap_or_revert Error>(self, error: E) -> T { + assert_or_revert(self, |arg| arg.is_some(), |_| error(&())).unwrap() + } +} + +impl Unwrap for Result +where + T: Debug, + Err: Debug, +{ + type ErrorArg = Err; + + fn unwrap_or_revert Error>(self, error: E) -> T { + assert_or_revert( + self, + |arg| arg.is_ok(), + |e| error(e.as_ref().err().unwrap()), + ) + .unwrap() + } +} + +#[cfg(test)] +mod assert_or_revert_tests { + use crate::network::{ + assert::{assert_or_revert_bool_with, Assert}, + error::Error, + }; + + #[test] + fn test_assert_or_revert_bool_with_true() { + assert_or_revert_bool_with(true, || Error::ArrayIsEmpty); + } + + #[should_panic(expected = "Array is empty")] + #[test] + fn test_assert_or_revert_bool_with_false() { + assert_or_revert_bool_with(false, || Error::ArrayIsEmpty); + } + + #[test] + fn test_assert_or_revert_correct() { + 5.assert_or_revert(|&x| x == 5, |&size| Error::SizeNotSupported(size)); + } + + #[should_panic(expected = "Size not supported: 5")] + #[test] + fn test_assert_or_revert_wrong() { + 5.assert_or_revert(|&x| x < 5, |&size| Error::SizeNotSupported(size)); + } +} + +#[cfg(test)] +mod unwrap_or_revert_tests { + use crate::network::{assert::Unwrap, error::Error}; + + #[test] + fn test_unwrap_or_revert_some() { + let result = Some(543).unwrap_or_revert(|_| Error::CryptographicError(333)); + + assert_eq!(result, 543); + } + + #[should_panic(expected = "Cryptographic Error: 333")] + #[test] + fn test_unwrap_or_revert_none() { + (Option::::None).unwrap_or_revert(|_| Error::CryptographicError(333)); + } + + #[test] + fn test_unwrap_or_revert_ok() { + let result = Ok(256).unwrap_or_revert(|_: &Error| Error::CryptographicError(333)); + + assert_eq!(result, 256); + } + + #[should_panic(expected = "Cryptographic Error: 567")] + #[test] + fn test_unwrap_or_revert_err() { + Result::<&[u8], Error>::Err(Error::CryptographicError(567)).unwrap_or_revert(|e| e.clone()); + } +} diff --git a/src/network/casper/error.rs b/src/network/casper/error.rs new file mode 100644 index 0000000..de53ea6 --- /dev/null +++ b/src/network/casper/error.rs @@ -0,0 +1,8 @@ +use crate::network::error::Error; +use casper_types::ApiError; + +impl From for ApiError { + fn from(error: Error) -> Self { + ApiError::User(error.code()) + } +} diff --git a/src/network/casper/from_bytes_repr.rs b/src/network/casper/from_bytes_repr.rs new file mode 100644 index 0000000..64ec573 --- /dev/null +++ b/src/network/casper/from_bytes_repr.rs @@ -0,0 +1,7 @@ +use crate::network::specific::{FromBytesRepr, U256}; + +impl FromBytesRepr> for U256 { + fn from_bytes_repr(bytes: Vec) -> Self { + bytes.as_slice().into() + } +} diff --git a/src/network/casper/mod.rs b/src/network/casper/mod.rs new file mode 100644 index 0000000..48f16ea --- /dev/null +++ b/src/network/casper/mod.rs @@ -0,0 +1,38 @@ +use crate::network::{error::Error, specific::NetworkSpecific}; + +mod error; +mod from_bytes_repr; + +pub struct Casper; + +impl NetworkSpecific for Casper { + type BytesRepr = casper_types::bytesrepr::Bytes; + type ValueRepr = casper_types::U256; + type _Self = Self; + + const VALUE_SIZE: usize = 32; + + fn print(_text: String) { + #[cfg(all(not(test), feature = "print_debug"))] + { + casper_contract::contract_api::runtime::print(&_text); + } + + #[cfg(test)] + { + println!("{}", _text); + } + } + + fn revert(error: Error) -> ! { + #[cfg(not(test))] + { + casper_contract::contract_api::runtime::revert(error) + } + + #[cfg(test)] + { + panic!("{}", error) + } + } +} diff --git a/src/network/error.rs b/src/network/error.rs new file mode 100644 index 0000000..ac827df --- /dev/null +++ b/src/network/error.rs @@ -0,0 +1,175 @@ +use crate::network::{ + as_str::{AsAsciiStr, AsHexStr}, + specific::U256, +}; +use std::fmt::{Debug, Display, Formatter}; + +pub trait ContractErrorContent: Debug { + fn code(&self) -> u8; + fn message(&self) -> String; +} + +/// Errors that can be encountered in the deserializing&decrypting the RedStone payload or just contract execution process. +/// +/// These errors include issues with contract logic, data types, +/// cryptographic operations, and conditions specific to the requirements. +#[derive(Debug)] +pub enum Error { + /// Represents errors that arise from the contract itself. + /// + /// This variant is used for encapsulating errors that are specific to the contract's logic + /// or execution conditions that aren't covered by more specific error types. + ContractError(Box), + + /// Indicates an overflow error with `U256` numbers. + /// + /// Used when operations on `U256` numbers exceed their maximum value, potentially leading + /// to incorrect calculations or state. + NumberOverflow(U256), + + /// Used when an expected non-empty array or vector is found to be empty. + /// + /// This could occur in scenarios where the contract logic requires a non-empty collection + /// of items for the correct operation, for example, during aggregating the values. + ArrayIsEmpty, + + /// Represents errors related to cryptographic operations. + /// + /// This includes failures in signature verification, hashing, or other cryptographic + /// processes, with the usize indicating the position or identifier of the failed operation. + CryptographicError(usize), + + /// Signifies that an unsupported size was encountered. + /// + /// This could be used when a data structure or input does not meet the expected size + /// requirements for processing. + SizeNotSupported(usize), + + /// Indicates that the marker bytes for RedStone are incorrect. + /// + /// This error is specific to scenarios where marker or identifier bytes do not match + /// expected values, potentially indicating corrupted or tampered data. + WrongRedStoneMarker(Vec), + + /// Used when there is leftover data in a payload that should have been empty. + /// + /// This could indicate an error in data parsing or that additional, unexpected data + /// was included in a message or transaction. + NonEmptyPayloadRemainder(Vec), + + /// Indicates that the number of signers does not meet the required threshold. + /// + /// This variant includes the current number of signers, the required threshold, and + /// potentially a feed_id related to the operation that failed due to insufficient signers. + InsufficientSignerCount(usize, usize, U256), + + /// Used when a timestamp is older than allowed by the processor logic. + /// + /// Includes the position or identifier of the timestamp and the threshold value, + /// indicating that the provided timestamp is too far in the past. + TimestampTooOld(usize, u64), + + /// Indicates that a timestamp is further in the future than allowed. + /// + /// Similar to `TimestampTooOld`, but for future timestamps exceeding the contract's + /// acceptance window. + TimestampTooFuture(usize, u64), + + /// Represents errors that need to clone `ContractErrorContent`, which is not supported by default. + /// + /// This variant allows for the manual duplication of contract error information, including + /// an error code and a descriptive message. + ClonedContractError(u8, String), +} + +impl Error { + pub fn contract_error(value: T) -> Error { + Error::ContractError(Box::new(value)) + } + + pub(crate) fn code(&self) -> u16 { + match self { + Error::ContractError(boxed) => boxed.code() as u16, + Error::NumberOverflow(_) => 509, + Error::ArrayIsEmpty => 510, + Error::WrongRedStoneMarker(_) => 511, + Error::NonEmptyPayloadRemainder(_) => 512, + Error::InsufficientSignerCount(data_package_index, value, _) => { + (2000 + data_package_index * 10 + value) as u16 + } + Error::SizeNotSupported(size) => 600 + *size as u16, + Error::CryptographicError(size) => 700 + *size as u16, + Error::TimestampTooOld(data_package_index, _) => 1000 + *data_package_index as u16, + Error::TimestampTooFuture(data_package_index, _) => 1050 + *data_package_index as u16, + Error::ClonedContractError(code, _) => *code as u16, + } + } +} + +impl Display for Error { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Error::ContractError(boxed) => write!(f, "Contract error: {}", boxed.message()), + Error::NumberOverflow(number) => write!(f, "Number overflow: {}", number), + Error::ArrayIsEmpty => write!(f, "Array is empty"), + Error::CryptographicError(size) => write!(f, "Cryptographic Error: {}", size), + Error::SizeNotSupported(size) => write!(f, "Size not supported: {}", size), + Error::WrongRedStoneMarker(bytes) => { + write!(f, "Wrong RedStone marker: {}", bytes.as_hex_str()) + } + Error::NonEmptyPayloadRemainder(bytes) => { + write!(f, "Non empty payload remainder: {}", bytes.as_hex_str()) + } + Error::InsufficientSignerCount(data_package_index, value, feed_id) => write!( + f, + "Insufficient signer count {} for #{} ({})", + value, + data_package_index, + feed_id.as_ascii_str() + ), + Error::TimestampTooOld(data_package_index, value) => { + write!( + f, + "Timestamp {} is too old for #{}", + value, data_package_index + ) + } + Error::TimestampTooFuture(data_package_index, value) => write!( + f, + "Timestamp {} is too future for #{}", + value, data_package_index + ), + Error::ClonedContractError(_, message) => { + write!(f, "(Cloned) Contract error: {}", message) + } + } + } +} + +impl Clone for Error { + fn clone(&self) -> Self { + match self { + Error::ContractError(content) => { + Error::ClonedContractError(content.code(), content.message()) + } + Error::NumberOverflow(value) => Error::NumberOverflow(*value), + Error::ArrayIsEmpty => Error::ArrayIsEmpty, + Error::CryptographicError(size) => Error::CryptographicError(*size), + Error::SizeNotSupported(size) => Error::SizeNotSupported(*size), + Error::WrongRedStoneMarker(bytes) => Error::WrongRedStoneMarker(bytes.clone()), + Error::NonEmptyPayloadRemainder(bytes) => { + Error::NonEmptyPayloadRemainder(bytes.clone()) + } + Error::InsufficientSignerCount(count, needed, bytes) => { + Error::InsufficientSignerCount(*count, *needed, *bytes) + } + Error::TimestampTooOld(index, timestamp) => Error::TimestampTooOld(*index, *timestamp), + Error::TimestampTooFuture(index, timestamp) => { + Error::TimestampTooFuture(*index, *timestamp) + } + Error::ClonedContractError(code, message) => { + Error::ClonedContractError(*code, message.as_str().into()) + } + } + } +} diff --git a/src/network/flattened.rs b/src/network/flattened.rs new file mode 100644 index 0000000..a639985 --- /dev/null +++ b/src/network/flattened.rs @@ -0,0 +1,35 @@ +use crate::network::specific::Bytes; + +pub trait Flattened { + fn flattened(&self) -> T; +} + +impl Flattened for Vec { + fn flattened(&self) -> Bytes { + #[allow(clippy::useless_conversion)] + self.iter().flatten().copied().collect::>().into() + } +} + +#[cfg(test)] +mod tests { + use crate::network::{flattened::Flattened, specific::Bytes}; + + #[test] + fn test_bytes_flattened() { + #[allow(clippy::useless_conversion)] + let bytes: Vec = vec![ + vec![1u8, 2, 3].into(), + vec![4u8].into(), + vec![].into(), + vec![5, 6, 7].into(), + ]; + + let result: Bytes = bytes.flattened(); + + #[allow(clippy::useless_conversion)] + let expected_result: Bytes = vec![1u8, 2, 3, 4, 5, 6, 7].into(); + + assert_eq!(result, expected_result); + } +} diff --git a/src/network/mod.rs b/src/network/mod.rs new file mode 100644 index 0000000..f963421 --- /dev/null +++ b/src/network/mod.rs @@ -0,0 +1,18 @@ +pub mod as_str; +pub mod assert; +pub mod error; +pub mod print_debug; +pub mod specific; + +#[cfg(feature = "network_casper")] +pub mod casper; + +#[cfg(feature = "network_casper")] +pub type _Network = casper::Casper; + +pub mod flattened; +#[cfg(not(feature = "network_casper"))] +mod std; + +#[cfg(not(feature = "network_casper"))] +pub type _Network = std::Std; diff --git a/src/network/print_debug.rs b/src/network/print_debug.rs new file mode 100644 index 0000000..9190d2c --- /dev/null +++ b/src/network/print_debug.rs @@ -0,0 +1,23 @@ +extern crate alloc; + +#[macro_export] +macro_rules! print_debug { + ($fmt:expr) => { + $crate::network::specific::print(format!($fmt)) + }; + ($fmt:expr, $($args:tt)*) => { + $crate::network::specific::print(format!($fmt, $($args)*)) + }; +} + +#[macro_export] +macro_rules! print_and_panic { + ($fmt:expr) => {{ + $crate::print_debug!($fmt); + panic!($fmt) + }}; + ($fmt:expr, $($args:tt)*) => {{ + $crate::print_debug!($fmt, $($args)*); + panic!($fmt, $($args)*) + }}; +} diff --git a/src/network/specific.rs b/src/network/specific.rs new file mode 100644 index 0000000..839af0b --- /dev/null +++ b/src/network/specific.rs @@ -0,0 +1,33 @@ +use crate::network::{_Network, error::Error}; + +pub trait FromBytesRepr { + fn from_bytes_repr(bytes: T) -> Self; +} + +pub trait NetworkSpecific { + type BytesRepr: From> + Into>; + type ValueRepr: FromBytesRepr>; + type _Self; + + const VALUE_SIZE: usize; + + fn print(_text: String); + fn revert(error: Error) -> !; +} + +pub type Bytes = <_Network as NetworkSpecific>::BytesRepr; +pub type U256 = <_Network as NetworkSpecific>::ValueRepr; + +pub(crate) type Network = <_Network as NetworkSpecific>::_Self; + +#[cfg(test)] +#[allow(dead_code)] +pub(crate) const VALUE_SIZE: usize = Network::VALUE_SIZE; + +pub fn print(_text: String) { + Network::print(_text) +} + +pub fn revert(error: Error) -> ! { + Network::revert(error) +} diff --git a/src/network/std/from_bytes_repr.rs b/src/network/std/from_bytes_repr.rs new file mode 100644 index 0000000..3e4ff4b --- /dev/null +++ b/src/network/std/from_bytes_repr.rs @@ -0,0 +1,13 @@ +use crate::network::specific::FromBytesRepr; + +impl FromBytesRepr> for u128 { + fn from_bytes_repr(bytes: Vec) -> u128 { + let bytes = bytes[(bytes.len().max(16) - 16)..].to_vec(); + + let mut result: u128 = 0; + for &byte in bytes.iter() { + result = (result << 8) | byte as u128; + } + result + } +} diff --git a/src/network/std/mod.rs b/src/network/std/mod.rs new file mode 100644 index 0000000..b921705 --- /dev/null +++ b/src/network/std/mod.rs @@ -0,0 +1,22 @@ +use crate::network::{error::Error, specific::NetworkSpecific}; +use std::eprintln; + +mod from_bytes_repr; + +pub struct Std; + +impl NetworkSpecific for Std { + type BytesRepr = Vec; + type ValueRepr = u128; + type _Self = Std; + + const VALUE_SIZE: usize = 16; + + fn print(text: String) { + eprintln!("{}", text) + } + + fn revert(error: Error) -> ! { + panic!("{}", error) + } +} diff --git a/src/protocol/constants.rs b/src/protocol/constants.rs new file mode 100644 index 0000000..fb96438 --- /dev/null +++ b/src/protocol/constants.rs @@ -0,0 +1,11 @@ +pub(crate) const UNSIGNED_METADATA_BYTE_SIZE_BS: usize = 3; +pub(crate) const DATA_PACKAGES_COUNT_BS: usize = 2; +pub(crate) const DATA_POINTS_COUNT_BS: usize = 3; +pub(crate) const SIGNATURE_BS: usize = 65; +pub(crate) const DATA_POINT_VALUE_BYTE_SIZE_BS: usize = 4; +pub(crate) const DATA_FEED_ID_BS: usize = 32; +pub(crate) const TIMESTAMP_BS: usize = 6; +pub(crate) const MAX_TIMESTAMP_DELAY_MS: u64 = 15 * 60 * 1000; // 15 minutes in milliseconds +pub(crate) const MAX_TIMESTAMP_AHEAD_MS: u64 = 3 * 60 * 1000; // 3 minutes in milliseconds +pub(crate) const REDSTONE_MARKER_BS: usize = 9; +pub(crate) const REDSTONE_MARKER: [u8; 9] = [0, 0, 2, 237, 87, 1, 30, 0, 0]; // 0x000002ed57011e0000 diff --git a/src/protocol/data_package.rs b/src/protocol/data_package.rs new file mode 100644 index 0000000..00bf75e --- /dev/null +++ b/src/protocol/data_package.rs @@ -0,0 +1,215 @@ +use crate::{ + crypto::recover::recover_address, + network::as_str::AsHexStr, + protocol::{ + constants::{ + DATA_FEED_ID_BS, DATA_POINTS_COUNT_BS, DATA_POINT_VALUE_BYTE_SIZE_BS, SIGNATURE_BS, + TIMESTAMP_BS, + }, + data_point::{trim_data_points, DataPoint}, + }, + utils::trim::Trim, +}; +use std::fmt::{Debug, Formatter}; + +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct DataPackage { + pub(crate) signer_address: Vec, + pub(crate) timestamp: u64, + pub(crate) data_points: Vec, +} + +pub(crate) fn trim_data_packages(payload: &mut Vec, count: usize) -> Vec { + let mut data_packages = Vec::new(); + + for _ in 0..count { + let data_package = trim_data_package(payload); + data_packages.push(data_package); + } + + data_packages +} + +fn trim_data_package(payload: &mut Vec) -> DataPackage { + let signature = payload.trim_end(SIGNATURE_BS); + let mut tmp = payload.clone(); + + let data_point_count = payload.trim_end(DATA_POINTS_COUNT_BS); + let value_size = payload.trim_end(DATA_POINT_VALUE_BYTE_SIZE_BS); + let timestamp = payload.trim_end(TIMESTAMP_BS); + let size = data_point_count * (value_size + DATA_FEED_ID_BS) + + DATA_POINT_VALUE_BYTE_SIZE_BS + + TIMESTAMP_BS + + DATA_POINTS_COUNT_BS; + + let signable_bytes = tmp.trim_end(size); + let signer_address = recover_address(signable_bytes, signature); + + let data_points = trim_data_points(payload, data_point_count, value_size); + + DataPackage { + data_points, + timestamp, + signer_address, + } +} + +impl Debug for DataPackage { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "DataPackage {{\n signer_address: 0x{}, timestamp: {},\n data_points: {:?}\n}}", + self.signer_address.as_hex_str(), + self.timestamp, + self.data_points + ) + } +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod tests { + use crate::{ + helpers::hex::hex_to_bytes, + network::specific::{FromBytesRepr, U256}, + protocol::{ + constants::{ + DATA_FEED_ID_BS, DATA_POINTS_COUNT_BS, DATA_POINT_VALUE_BYTE_SIZE_BS, SIGNATURE_BS, + TIMESTAMP_BS, + }, + data_package::{trim_data_package, trim_data_packages, DataPackage}, + data_point::DataPoint, + }, + }; + + const DATA_PACKAGE_BYTES_1: &str = "4554480000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000360cafc94e018d79bf0ba00000002000000151afa8c5c3caf6004b42c0fb17723e524f993b9ecbad3b9bce5ec74930fa436a3660e8edef10e96ee5f222de7ef5787c02ca467c0ec18daa2907b43ac20c63c11c"; + const DATA_PACKAGE_BYTES_2: &str = "4554480000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000360cdd851e018d79bf0ba000000020000001473fd9dc72e6814a7de719b403cf4c9eba08934a643fd0666c433b806b31e69904f2226ffd3c8ef75861b11b5e32a1fda4b1458e0da4605a772dfba2a812f3ee1b"; + + const SIGNER_ADDRESS_1: &str = "1ea62d73edf8ac05dfcea1a34b9796e937a29eff"; + const SIGNER_ADDRESS_2: &str = "109b4a318a4f5ddcbca6349b45f881b4137deafb"; + + const VALUE_1: u128 = 232141080910; + const VALUE_2: u128 = 232144078110; + + const DATA_PACKAGE_SIZE: usize = 32 + + DATA_FEED_ID_BS + + DATA_POINT_VALUE_BYTE_SIZE_BS + + TIMESTAMP_BS + + SIGNATURE_BS + + DATA_POINTS_COUNT_BS; + + #[test] + fn test_trim_data_packages() { + test_trim_data_packages_of(2, ""); + test_trim_data_packages_of(0, ""); + test_trim_data_packages_of(1, ""); + } + + #[test] + fn test_trim_data_packages_with_prefix() { + let prefix = "da4687f1914a1c"; + + test_trim_data_packages_of(2, prefix); + } + + #[test] + fn test_trim_data_packages_single() { + let mut bytes = hex_to_bytes(DATA_PACKAGE_BYTES_1.into()); + let data_packages = trim_data_packages(&mut bytes, 1); + assert_eq!(data_packages.len(), 1); + assert_eq!(bytes, Vec::::new()); + + verify_data_package(data_packages[0].clone(), VALUE_1, SIGNER_ADDRESS_1); + } + + fn test_trim_data_packages_of(count: usize, prefix: &str) { + let input: Vec = + hex_to_bytes((prefix.to_owned() + DATA_PACKAGE_BYTES_1) + DATA_PACKAGE_BYTES_2); + let mut bytes = input.clone(); + let data_packages = trim_data_packages(&mut bytes, count); + + assert_eq!(data_packages.len(), count); + assert_eq!( + bytes.as_slice(), + &input[..input.len() - count * DATA_PACKAGE_SIZE] + ); + + let values = &[VALUE_2, VALUE_1]; + let signers = &[SIGNER_ADDRESS_2, SIGNER_ADDRESS_1]; + + for i in 0..count { + verify_data_package(data_packages[i].clone(), values[i], signers[i]); + } + } + + #[should_panic(expected = "index out of bounds")] + #[test] + fn test_trim_data_packages_bigger_number() { + test_trim_data_packages_of(3, ""); + } + + #[test] + fn test_trim_data_package() { + test_trim_data_package_of(DATA_PACKAGE_BYTES_1, VALUE_1, SIGNER_ADDRESS_1); + test_trim_data_package_of(DATA_PACKAGE_BYTES_2, VALUE_2, SIGNER_ADDRESS_2); + } + + #[test] + fn test_trim_data_package_with_prefix() { + test_trim_data_package_of( + &("da4687f1914a1c".to_owned() + DATA_PACKAGE_BYTES_1), + VALUE_1, + SIGNER_ADDRESS_1, + ); + test_trim_data_package_of( + &("da4687f1914a1c".to_owned() + DATA_PACKAGE_BYTES_2), + VALUE_2, + SIGNER_ADDRESS_2, + ); + } + + #[should_panic] + #[test] + fn test_trim_data_package_signature_only() { + test_trim_data_package_of( + &DATA_PACKAGE_BYTES_1[(DATA_PACKAGE_BYTES_1.len() - 2 * SIGNATURE_BS)..], + 0, + "", + ); + } + + #[should_panic] + #[test] + fn test_trim_data_package_shorter() { + test_trim_data_package_of( + &DATA_PACKAGE_BYTES_1 + [(DATA_PACKAGE_BYTES_1.len() - 2 * (SIGNATURE_BS + DATA_POINTS_COUNT_BS))..], + 0, + "", + ); + } + + fn test_trim_data_package_of(bytes_str: &str, expected_value: u128, signer_address: &str) { + let mut bytes: Vec = hex_to_bytes(bytes_str.into()); + let result = trim_data_package(&mut bytes); + assert_eq!( + bytes, + hex_to_bytes(bytes_str[..bytes_str.len() - 2 * (DATA_PACKAGE_SIZE)].into()) + ); + + verify_data_package(result, expected_value, signer_address); + } + + fn verify_data_package(result: DataPackage, expected_value: u128, signer_address: &str) { + let data_package = DataPackage { + data_points: vec![DataPoint { + feed_id: U256::from_bytes_repr(hex_to_bytes(DATA_PACKAGE_BYTES_1[..6].into())), + value: U256::from(expected_value), + }], + timestamp: 1707144580000, + signer_address: hex_to_bytes(signer_address.into()), + }; + + assert_eq!(result, data_package); + } +} diff --git a/src/protocol/data_point.rs b/src/protocol/data_point.rs new file mode 100644 index 0000000..15145f4 --- /dev/null +++ b/src/protocol/data_point.rs @@ -0,0 +1,153 @@ +use crate::{ + network::{ + as_str::{AsAsciiStr, AsHexStr}, + assert::Assert, + error::Error, + specific::{FromBytesRepr, U256}, + }, + protocol::constants::DATA_FEED_ID_BS, + utils::{trim::Trim, trim_zeros::TrimZeros}, +}; +use std::fmt::{Debug, Formatter}; + +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct DataPoint { + pub(crate) feed_id: U256, + pub(crate) value: U256, +} + +pub(crate) fn trim_data_points( + payload: &mut Vec, + count: usize, + value_size: usize, +) -> Vec { + count.assert_or_revert(|&count| count == 1, |&count| Error::SizeNotSupported(count)); + + let mut data_points = Vec::new(); + + for _ in 0..count { + let data_point = trim_data_point(payload, value_size); + data_points.push(data_point); + } + + data_points +} + +fn trim_data_point(payload: &mut Vec, value_size: usize) -> DataPoint { + let value = payload.trim_end(value_size); + let feed_id: Vec = payload.trim_end(DATA_FEED_ID_BS); + + DataPoint { + value, + feed_id: U256::from_bytes_repr(feed_id.trim_zeros()), + } +} + +impl Debug for DataPoint { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "DataPoint {{\n feed_id: {:?} (0x{}), value: {}\n }}", + self.feed_id.as_ascii_str(), + self.feed_id.as_hex_str(), + self.value, + ) + } +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod tests { + use crate::{ + helpers::hex::hex_to_bytes, + network::specific::{FromBytesRepr, U256, VALUE_SIZE}, + protocol::{ + constants::DATA_FEED_ID_BS, + data_point::{trim_data_point, trim_data_points, DataPoint}, + }, + }; + use std::ops::Shr; + + const DATA_POINT_BYTES_TAIL: &str = "4554480000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000360cafc94e"; + const VALUE: u128 = 232141080910; + + #[test] + fn test_trim_data_points() { + let mut bytes = hex_to_bytes(DATA_POINT_BYTES_TAIL.into()); + let result = trim_data_points(&mut bytes, 1, 32); + + assert_eq!(result.len(), 1); + + verify_rest_and_result( + DATA_POINT_BYTES_TAIL, + 32, + VALUE.into(), + bytes, + result[0].clone(), + ) + } + + #[should_panic(expected = "Size not supported: 0")] + #[test] + fn test_trim_zero_data_points() { + trim_data_points(&mut hex_to_bytes(DATA_POINT_BYTES_TAIL.into()), 0, 32); + } + + #[should_panic(expected = "Size not supported: 2")] + #[test] + fn test_trim_two_data_points() { + trim_data_points(&mut hex_to_bytes(DATA_POINT_BYTES_TAIL.into()), 2, 32); + } + + #[test] + fn test_trim_data_point() { + test_trim_data_point_of(DATA_POINT_BYTES_TAIL, 32, VALUE.into()); + } + + #[test] + fn test_trim_data_point_with_prefix() { + test_trim_data_point_of( + &("a2a812f3ee1b".to_owned() + DATA_POINT_BYTES_TAIL), + 32, + VALUE.into(), + ); + } + + #[test] + fn test_trim_data_point_other_lengths() { + for i in 1..VALUE_SIZE { + test_trim_data_point_of( + &DATA_POINT_BYTES_TAIL[..DATA_POINT_BYTES_TAIL.len() - 2 * i], + 32 - i, + U256::from(VALUE).shr(8 * i), + ); + } + } + + fn test_trim_data_point_of(value: &str, size: usize, expected_value: U256) { + let mut bytes = hex_to_bytes(value.into()); + let result = trim_data_point(&mut bytes, size); + + verify_rest_and_result(value, size, expected_value, bytes, result); + } + + fn verify_rest_and_result( + value: &str, + size: usize, + expected_value: U256, + rest: Vec, + result: DataPoint, + ) { + assert_eq!( + rest, + hex_to_bytes(value[..value.len() - 2 * (size + DATA_FEED_ID_BS)].into()) + ); + + let data_point = DataPoint { + value: expected_value, + feed_id: U256::from_bytes_repr(hex_to_bytes(DATA_POINT_BYTES_TAIL[..6].to_string())), + }; + + assert_eq!(result, data_point); + } +} diff --git a/src/protocol/marker.rs b/src/protocol/marker.rs new file mode 100644 index 0000000..0fb2e1f --- /dev/null +++ b/src/protocol/marker.rs @@ -0,0 +1,66 @@ +use crate::{ + network::{assert::Assert, error::Error}, + protocol::constants::{REDSTONE_MARKER, REDSTONE_MARKER_BS}, + utils::trim::Trim, +}; + +pub fn trim_redstone_marker(payload: &mut Vec) { + let marker: Vec = payload.trim_end(REDSTONE_MARKER_BS); + + marker.as_slice().assert_or_revert( + |&marker| marker == REDSTONE_MARKER, + |&val| Error::WrongRedStoneMarker(val.into()), + ); +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod tests { + use crate::{ + helpers::hex::hex_to_bytes, + protocol::{constants::REDSTONE_MARKER_BS, marker::trim_redstone_marker}, + }; + + const PAYLOAD_TAIL: &str = "1c000f000000000002ed57011e0000"; + + #[test] + fn test_trim_redstone_marker() { + let mut bytes = hex_to_bytes(PAYLOAD_TAIL.into()); + trim_redstone_marker(&mut bytes); + + assert_eq!( + bytes, + hex_to_bytes(PAYLOAD_TAIL[..PAYLOAD_TAIL.len() - 2 * REDSTONE_MARKER_BS].into()) + ); + } + + #[should_panic(expected = "Wrong RedStone marker: 000002ed57022e0000")] + #[test] + fn test_trim_redstone_marker_wrong() { + trim_redstone_marker(&mut hex_to_bytes(PAYLOAD_TAIL.replace('1', "2"))); + } + + #[should_panic(expected = "Wrong RedStone marker: 00000002ed57011e00")] + #[test] + fn test_trim_redstone_marker_wrong_ending() { + trim_redstone_marker(&mut hex_to_bytes( + PAYLOAD_TAIL[..PAYLOAD_TAIL.len() - 2].into(), + )); + } + + #[should_panic(expected = "Wrong RedStone marker: 100002ed57011e0000")] + #[test] + fn test_trim_redstone_marker_wrong_beginning() { + trim_redstone_marker(&mut hex_to_bytes( + PAYLOAD_TAIL.replace("0000000", "1111111"), + )); + } + + #[should_panic(expected = "Wrong RedStone marker: 0002ed57011e0000")] + #[test] + fn test_trim_redstone_marker_too_short() { + trim_redstone_marker(&mut hex_to_bytes( + PAYLOAD_TAIL[PAYLOAD_TAIL.len() - 2 * (REDSTONE_MARKER_BS - 1)..].into(), + )); + } +} diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs new file mode 100644 index 0000000..07dfdef --- /dev/null +++ b/src/protocol/mod.rs @@ -0,0 +1,5 @@ +pub(crate) mod constants; +pub(crate) mod data_package; +pub(crate) mod data_point; +pub(crate) mod marker; +pub(crate) mod payload; diff --git a/src/protocol/payload.rs b/src/protocol/payload.rs new file mode 100644 index 0000000..9cd0212 --- /dev/null +++ b/src/protocol/payload.rs @@ -0,0 +1,104 @@ +use crate::{ + network::{assert::Assert, error::Error}, + protocol::{ + constants::{DATA_PACKAGES_COUNT_BS, UNSIGNED_METADATA_BYTE_SIZE_BS}, + data_package::{trim_data_packages, DataPackage}, + marker, + }, + utils::trim::Trim, +}; + +#[derive(Clone, Debug)] +pub(crate) struct Payload { + pub(crate) data_packages: Vec, +} + +impl Payload { + pub(crate) fn make(payload_bytes: &mut Vec) -> Payload { + marker::trim_redstone_marker(payload_bytes); + let payload = trim_payload(payload_bytes); + + payload_bytes.assert_or_revert( + |bytes| bytes.is_empty(), + |bytes| Error::NonEmptyPayloadRemainder(bytes.as_slice().into()), + ); + + payload + } +} + +fn trim_payload(payload: &mut Vec) -> Payload { + let data_package_count = trim_metadata(payload); + let data_packages = trim_data_packages(payload, data_package_count); + + Payload { data_packages } +} + +fn trim_metadata(payload: &mut Vec) -> usize { + let unsigned_metadata_size = payload.trim_end(UNSIGNED_METADATA_BYTE_SIZE_BS); + let _: Vec = payload.trim_end(unsigned_metadata_size); + + payload.trim_end(DATA_PACKAGES_COUNT_BS) +} + +#[cfg(feature = "helpers")] +#[cfg(test)] +mod tests { + use crate::{ + helpers::hex::{hex_to_bytes, read_payload_bytes, read_payload_hex}, + protocol::{ + constants::REDSTONE_MARKER_BS, + payload::{trim_metadata, trim_payload, Payload}, + }, + }; + + const PAYLOAD_METADATA_BYTES: &str = "000f000000"; + const PAYLOAD_METADATA_WITH_UNSIGNED_BYTE: &str = "000f55000001"; + const PAYLOAD_METADATA_WITH_UNSIGNED_BYTES: &str = "000f11223344556677889900aabbccddeeff000010"; + + #[test] + fn test_trim_metadata() { + let prefix = "9e0294371c"; + + for &bytes_str in &[ + PAYLOAD_METADATA_BYTES, + PAYLOAD_METADATA_WITH_UNSIGNED_BYTE, + PAYLOAD_METADATA_WITH_UNSIGNED_BYTES, + ] { + let mut bytes = hex_to_bytes(prefix.to_owned() + bytes_str); + let result = trim_metadata(&mut bytes); + + assert_eq!(bytes, hex_to_bytes(prefix.into())); + assert_eq!(result, 15); + } + } + + #[test] + fn test_trim_payload() { + let payload_hex = read_payload_bytes("./sample-data/payload.hex"); + + let mut bytes = payload_hex[..payload_hex.len() - REDSTONE_MARKER_BS].into(); + let payload = trim_payload(&mut bytes); + + assert_eq!(bytes, Vec::::new()); + assert_eq!(payload.data_packages.len(), 15); + } + + #[test] + fn test_make_payload() { + let mut payload_hex = read_payload_bytes("./sample-data/payload.hex"); + let payload = Payload::make(&mut payload_hex); + + assert_eq!(payload.data_packages.len(), 15); + } + + #[should_panic(expected = "Non empty payload remainder: 12")] + #[test] + fn test_make_payload_with_prefix() { + let payload_hex = read_payload_hex("./sample-data/payload.hex"); + let mut bytes = hex_to_bytes("12".to_owned() + &payload_hex); + let payload = Payload::make(&mut bytes); + + assert_eq!(payload.data_packages.len(), 15); + } +} diff --git a/src/utils/filter.rs b/src/utils/filter.rs new file mode 100644 index 0000000..9ee9c1a --- /dev/null +++ b/src/utils/filter.rs @@ -0,0 +1,21 @@ +pub(crate) trait FilterSome { + fn filter_some(&self) -> Output; +} + +impl FilterSome> for [Option] { + fn filter_some(&self) -> Vec { + self.iter().filter_map(|&opt| opt).collect() + } +} + +#[cfg(test)] +mod filter_some_tests { + use crate::utils::filter::FilterSome; + + #[test] + fn test_filter_some() { + let values = vec![None, Some(23u64), None, Some(12), Some(12), None, Some(23)]; + + assert_eq!(values.filter_some(), vec![23, 12, 12, 23]) + } +} diff --git a/src/utils/median.rs b/src/utils/median.rs new file mode 100644 index 0000000..ed10b24 --- /dev/null +++ b/src/utils/median.rs @@ -0,0 +1,161 @@ +use crate::network::{assert::Assert, error::Error::ArrayIsEmpty}; +use std::ops::{Add, Rem, Shr}; + +pub(crate) trait Median { + type Item; + + fn median(self) -> Self::Item; +} + +trait Avg { + fn avg(self, other: Self) -> Self; +} + +impl Avg for T +where + T: Add + Shr + From + Rem + Copy, +{ + fn avg(self, other: Self) -> Self { + let one = T::from(1); + let two = T::from(2); + + self.shr(one) + other.shr(one) + (self % two + other % two).shr(one) + } +} + +impl Median for Vec +where + T: Copy + Ord + Avg, +{ + type Item = T; + + fn median(self) -> Self::Item { + let len = self.len(); + + match len.assert_or_revert(|x| *x > 0, |_| ArrayIsEmpty) { + 1 => self[0], + 2 => self[0].avg(self[1]), + 3 => maybe_pick_median(self[0], self[1], self[2]).unwrap_or_else(|| { + maybe_pick_median(self[1], self[0], self[2]) + .unwrap_or_else(|| maybe_pick_median(self[1], self[2], self[0]).unwrap()) + }), + _ => { + let mut values = self; + values.sort(); + + let mid = len / 2; + + if len % 2 == 0 { + values[mid - 1].avg(values[mid]) + } else { + values[mid] + } + } + } + } +} + +#[inline] +fn maybe_pick_median(a: T, b: T, c: T) -> Option +where + T: PartialOrd, +{ + if (b >= a && b <= c) || (b >= c && b <= a) { + Some(b) + } else { + None + } +} + +#[cfg(test)] +mod tests { + use super::{Avg, Median}; + use crate::network::specific::U256; + use itertools::Itertools; + use std::fmt::Debug; + + const U256MAX: U256 = U256::max_value(); // 115792089237316195423570985008687907853269984665640564039457584007913129639935 + + #[test] + fn test_casper_avg() { + assert_eq!(U256MAX.avg(U256::from(0u8)), U256MAX / 2); + assert_eq!(U256MAX.avg(U256::from(1u8)), U256MAX / 2 + 1); + assert_eq!(U256MAX.avg(U256MAX - 1), U256MAX - 1); + assert_eq!(U256MAX.avg(U256MAX), U256MAX); + + assert_eq!((U256MAX - 1).avg(U256::from(0u8)), U256MAX / 2); + assert_eq!((U256MAX - 1).avg(U256::from(1u8)), U256MAX / 2); + assert_eq!((U256MAX - 1).avg(U256MAX - 1), U256MAX - 1); + assert_eq!((U256MAX - 1).avg(U256MAX), U256MAX - 1); + } + + #[test] + #[should_panic(expected = "Array is empty")] + fn test_median_empty_vector() { + let vec: Vec = vec![]; + + vec.median(); + } + + #[test] + fn test_median_single_element() { + assert_eq!(vec![1].median(), 1); + } + + #[test] + fn test_median_two_elements() { + test_all_permutations(vec![1, 3], 2); + test_all_permutations(vec![1, 2], 1); + test_all_permutations(vec![1, 1], 1); + } + + #[test] + fn test_median_three_elements() { + test_all_permutations(vec![1, 2, 3], 2); + test_all_permutations(vec![1, 1, 2], 1); + test_all_permutations(vec![1, 2, 2], 2); + test_all_permutations(vec![1, 1, 1], 1); + } + + #[test] + fn test_median_even_number_of_elements() { + test_all_permutations(vec![1, 2, 3, 4], 2); + test_all_permutations(vec![1, 2, 4, 4], 3); + test_all_permutations(vec![1, 1, 3, 3], 2); + test_all_permutations(vec![1, 1, 3, 4], 2); + test_all_permutations(vec![1, 1, 1, 3], 1); + test_all_permutations(vec![1, 3, 3, 3], 3); + test_all_permutations(vec![1, 1, 1, 1], 1); + test_all_permutations(vec![1, 2, 3, 4, 5, 6], 3); + } + + #[test] + fn test_median_odd_number_of_elements() { + test_all_permutations(vec![1, 2, 3, 4, 5], 3); + test_all_permutations(vec![1, 1, 3, 4, 5], 3); + test_all_permutations(vec![1, 1, 1, 4, 5], 1); + test_all_permutations(vec![1, 1, 1, 3, 3], 1); + test_all_permutations(vec![1, 1, 3, 3, 5], 3); + + test_all_permutations(vec![1, 2, 3, 5, 5], 3); + test_all_permutations(vec![1, 2, 5, 5, 5], 5); + test_all_permutations(vec![1, 1, 3, 3, 3], 3); + test_all_permutations(vec![1, 3, 3, 5, 5], 3); + + test_all_permutations(vec![1, 2, 2, 2, 2], 2); + test_all_permutations(vec![1, 1, 1, 1, 2], 1); + test_all_permutations(vec![1, 1, 1, 1, 1], 1); + + test_all_permutations(vec![1, 2, 3, 4, 5, 6, 7], 4); + } + + fn test_all_permutations(numbers: Vec, expected_value: T) { + let perms: Vec> = numbers.iter().permutations(numbers.len()).collect(); + + for perm in perms { + let p: Vec<_> = perm.iter().map(|&&v| v).collect(); + + assert_eq!(p.median(), expected_value); + } + } +} diff --git a/src/utils/mod.rs b/src/utils/mod.rs new file mode 100644 index 0000000..9d0b5da --- /dev/null +++ b/src/utils/mod.rs @@ -0,0 +1,4 @@ +pub(crate) mod filter; +pub(crate) mod median; +pub(crate) mod trim; +pub(crate) mod trim_zeros; diff --git a/src/utils/trim.rs b/src/utils/trim.rs new file mode 100644 index 0000000..7cd3ce0 --- /dev/null +++ b/src/utils/trim.rs @@ -0,0 +1,151 @@ +use crate::network::{ + assert::Unwrap, + error::Error, + specific::{FromBytesRepr, U256}, +}; + +pub trait Trim +where + Self: Sized, +{ + fn trim_end(&mut self, len: usize) -> T; +} + +impl Trim> for Vec { + fn trim_end(&mut self, len: usize) -> Self { + if len >= self.len() { + std::mem::take(self) + } else { + self.split_off(self.len() - len) + } + } +} + +impl Trim for Vec { + fn trim_end(&mut self, len: usize) -> U256 { + U256::from_bytes_repr(self.trim_end(len)) + } +} + +impl Trim for Vec { + fn trim_end(&mut self, len: usize) -> usize { + let y: U256 = self.trim_end(len); + y.try_into().unwrap_or_revert(|_| Error::NumberOverflow(y)) + } +} + +impl Trim for Vec { + fn trim_end(&mut self, len: usize) -> u64 { + let y: U256 = self.trim_end(len); + y.try_into().unwrap_or_revert(|_| Error::NumberOverflow(y)) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + network::specific::U256, + protocol::constants::{REDSTONE_MARKER, REDSTONE_MARKER_BS}, + utils::trim::Trim, + }; + + const MARKER_DECIMAL: u64 = 823907890102272; + + fn redstone_marker_bytes() -> Vec { + REDSTONE_MARKER.into() + } + + #[test] + fn test_trim_end_number() { + let (rest, result): (_, U256) = test_trim_end(3); + assert_eq!(result, (256u32.pow(2) * 30).into()); + assert_eq!(rest.as_slice(), &REDSTONE_MARKER[..6]); + + let (_, result): (_, u64) = test_trim_end(3); + assert_eq!(result, 256u64.pow(2) * 30); + + let (_, result): (_, usize) = test_trim_end(3); + assert_eq!(result, 256usize.pow(2) * 30); + + let (_, result): (_, Vec) = test_trim_end(3); + assert_eq!(result.as_slice(), &REDSTONE_MARKER[6..]); + } + + #[test] + fn test_trim_end_number_null() { + let (rest, result): (_, U256) = test_trim_end(0); + assert_eq!(result, 0u32.into()); + assert_eq!(rest.as_slice(), &REDSTONE_MARKER); + + let (_, result): (_, u64) = test_trim_end(0); + assert_eq!(result, 0); + + let (_, result): (_, usize) = test_trim_end(0); + assert_eq!(result, 0); + + let (_, result): (_, Vec) = test_trim_end(0); + assert_eq!(result, Vec::::new()); + } + + #[test] + fn test_trim_end_whole() { + test_trim_end_whole_size(REDSTONE_MARKER_BS); + test_trim_end_whole_size(REDSTONE_MARKER_BS - 1); + test_trim_end_whole_size(REDSTONE_MARKER_BS - 2); + test_trim_end_whole_size(REDSTONE_MARKER_BS + 1); + } + + fn test_trim_end_whole_size(size: usize) { + let (rest, result): (_, U256) = test_trim_end(size); + assert_eq!(result, MARKER_DECIMAL.into()); + assert_eq!( + rest.as_slice().len(), + REDSTONE_MARKER_BS - size.min(REDSTONE_MARKER_BS) + ); + + let (_, result): (_, u64) = test_trim_end(size); + assert_eq!(result, MARKER_DECIMAL); + + let (_, result): (_, usize) = test_trim_end(size); + assert_eq!(result, 823907890102272usize); + + let (_rest, result): (_, Vec) = test_trim_end(size); + assert_eq!(result.as_slice().len(), size.min(REDSTONE_MARKER_BS)); + } + + #[test] + fn test_trim_end_u64() { + let mut bytes = vec![255, 255, 255, 255, 255, 255, 255, 255, 255]; + let x: u64 = bytes.trim_end(8); + + let expected_bytes = vec![255]; + + assert_eq!(bytes, expected_bytes); + assert_eq!(x, 18446744073709551615); + } + + #[should_panic(expected = "Number overflow: 18591708106338011145")] + #[test] + fn test_trim_end_u64_overflow() { + let mut bytes = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9]; + + let _: u64 = bytes.trim_end(9); + } + + #[allow(dead_code)] + trait TestTrimEnd + where + Self: Sized, + { + fn test_trim_end(size: usize) -> (Self, T); + } + + fn test_trim_end(size: usize) -> (Vec, T) + where + Vec: Trim, + { + let mut bytes = redstone_marker_bytes(); + let rest = bytes.trim_end(size); + (bytes, rest) + } +} diff --git a/src/utils/trim_zeros.rs b/src/utils/trim_zeros.rs new file mode 100644 index 0000000..90b05a9 --- /dev/null +++ b/src/utils/trim_zeros.rs @@ -0,0 +1,45 @@ +pub trait TrimZeros { + fn trim_zeros(self) -> Self; +} + +impl TrimZeros for Vec { + fn trim_zeros(self) -> Self { + let mut res = self.len(); + + for i in (0..self.len()).rev() { + if self[i] != 0 { + break; + } + + res = i; + } + + let (rest, _) = self.split_at(res); + + rest.into() + } +} + +#[cfg(test)] +mod tests { + use crate::{protocol::constants::REDSTONE_MARKER, utils::trim_zeros::TrimZeros}; + + fn redstone_marker_bytes() -> Vec { + REDSTONE_MARKER.as_slice().into() + } + + #[test] + fn test_trim_zeros() { + let trimmed = redstone_marker_bytes().trim_zeros(); + assert_eq!(trimmed.as_slice(), &REDSTONE_MARKER[..7]); + + let trimmed = trimmed.trim_zeros(); + assert_eq!(trimmed.as_slice(), &REDSTONE_MARKER[..7]); + } + + #[test] + fn test_trim_zeros_empty() { + let trimmed = Vec::::default().trim_zeros(); + assert_eq!(trimmed, Vec::::default()); + } +}