From b1afc1d45bd9d069daa43b3bd7f15cf4bc282f0e Mon Sep 17 00:00:00 2001 From: Ayush Mishra Date: Fri, 10 May 2024 17:26:01 +0530 Subject: [PATCH] Extended Cluster pallet by Cluster Configuration parameters (#332) ## Description Extended Cluster pallet by Cluster Configuration parameters ## Types of Changes Please select the branch type you are merging and fill in the relevant template. - [ ] Hotfix - [ ] Release - [X] Fix or Feature ## Fix or Feature ### Types of Changes - [ ] Tech Debt (Code improvements) - [ ] Bug fix (non-breaking change which fixes an issue) - [X] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to change) - [ ] Dependency upgrade (A change in substrate or any 3rd party crate version) ### Migrations and Hooks - [X] This change requires a runtime migration. - [ ] Modifies `on_initialize` - [ ] Modifies `on_finalize` ### Checklist for Fix or Feature - [X] Change has been tested locally. - [X] Change adds / updates tests if applicable. - [X] Changelog doc updated. ## Checklist for Hotfix - [ ] Change has been deployed to Testnet. - [ ] Change has been tested in Testnet. - [ ] Changelog has been updated. - [ ] Crate version has been updated. - [ ] Spec version has been updated. - [ ] Transaction version has been updated if required. - [ ] Pull Request to `dev` has been created. - [ ] Pull Request to `staging` has been created. ## Checklist for Release - [ ] Change has been deployed to Devnet. - [ ] Change has been tested in Devnet. - [ ] Change has been deployed to Qanet. - [ ] Change has been tested in Qanet. - [ ] Change has been deployed to Testnet. - [ ] Change has been tested in Testnet. - [X] Changelog has been updated. - [ ] Crate version has been updated. - [X] Spec version has been updated. - [X] Transaction version has been updated if required. --- CHANGELOG.md | 1 + Cargo.lock | 1 + pallets/ddc-clusters/Cargo.toml | 1 + pallets/ddc-clusters/src/benchmarking.rs | 24 ++- pallets/ddc-clusters/src/cluster.rs | 9 + pallets/ddc-clusters/src/lib.rs | 45 ++++- pallets/ddc-clusters/src/migration.rs | 198 ++++++++++++++++++++++ pallets/ddc-clusters/src/mock.rs | 10 +- pallets/ddc-clusters/src/testing_utils.rs | 23 ++- pallets/ddc-clusters/src/tests.rs | 129 ++++++++++++-- pallets/ddc-customers/src/benchmarking.rs | 7 +- pallets/ddc-payouts/src/benchmarking.rs | 7 +- pallets/ddc-staking/src/testing_utils.rs | 7 +- primitives/src/lib.rs | 3 + runtime/cere-dev/src/lib.rs | 8 +- runtime/cere/src/lib.rs | 8 +- 16 files changed, 450 insertions(+), 31 deletions(-) create mode 100644 pallets/ddc-clusters/src/migration.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 76aa4fbab..82b09606c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - [C,D] Updated Substrate to polkadot-v1.1.0 - [C,D] Introduction of the OpenGov +- [C,D] `pallet-ddc-clusters`: Added Erasure coding and Replication in cluster params ## [5.2.0] diff --git a/Cargo.lock b/Cargo.lock index ac4f35488..e51d12b61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5180,6 +5180,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", + "log", "pallet-balances", "pallet-contracts", "pallet-contracts-primitives", diff --git a/pallets/ddc-clusters/Cargo.toml b/pallets/ddc-clusters/Cargo.toml index c209fb9f8..7dda7df3f 100644 --- a/pallets/ddc-clusters/Cargo.toml +++ b/pallets/ddc-clusters/Cargo.toml @@ -12,6 +12,7 @@ repository.workspace = true # 3rd-party depdencies codec = { workspace = true } hex-literal = { workspace = true } +log = { workspace = true } scale-info = { workspace = true } serde = { workspace = true } diff --git a/pallets/ddc-clusters/src/benchmarking.rs b/pallets/ddc-clusters/src/benchmarking.rs index 5aa62f4c9..bcf46b49c 100644 --- a/pallets/ddc-clusters/src/benchmarking.rs +++ b/pallets/ddc-clusters/src/benchmarking.rs @@ -24,7 +24,12 @@ benchmarks! { create_cluster { let cluster_id = ClusterId::from([1; 20]); let user = account::("user", USER_SEED, 0u32); - let cluster_params = ClusterParams { node_provider_auth_contract: Some(user.clone()) }; + let cluster_params = ClusterParams { + node_provider_auth_contract: Some(user.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }; let cluster_gov_params: ClusterGovParams, BlockNumberFor> = ClusterGovParams { treasury_share: Perquintill::default(), validators_share: Perquintill::default(), @@ -78,10 +83,23 @@ benchmarks! { let user = account::("user", USER_SEED, 0u32); let user_2 = account::("user", USER_SEED_2, 0u32); let _ = config_cluster::(user.clone(), cluster_id); - let new_cluster_params = ClusterParams { node_provider_auth_contract: Some(user_2.clone()) }; + let new_cluster_params = ClusterParams { + node_provider_auth_contract: Some(user_2.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }; }: _(RawOrigin::Signed(user.clone()), cluster_id, new_cluster_params) verify { - assert_eq!(Clusters::::try_get(cluster_id).unwrap().props, ClusterProps { node_provider_auth_contract: Some(user_2) }); + assert_eq!( + Clusters::::try_get(cluster_id).unwrap().props, + ClusterProps { + node_provider_auth_contract: Some(user_2), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + } + ); } set_cluster_gov_params { diff --git a/pallets/ddc-clusters/src/cluster.rs b/pallets/ddc-clusters/src/cluster.rs index 93e19603e..0d9abae99 100644 --- a/pallets/ddc-clusters/src/cluster.rs +++ b/pallets/ddc-clusters/src/cluster.rs @@ -21,6 +21,9 @@ pub struct Cluster { #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Serialize, Deserialize)] pub struct ClusterProps { pub node_provider_auth_contract: Option, + pub erasure_coding_required: u32, + pub erasure_coding_total: u32, + pub replication_total: u32, } impl Cluster { @@ -36,6 +39,9 @@ impl Cluster { reserve_id, props: ClusterProps { node_provider_auth_contract: cluster_params.node_provider_auth_contract, + erasure_coding_required: cluster_params.erasure_coding_required, + erasure_coding_total: cluster_params.erasure_coding_total, + replication_total: cluster_params.replication_total, }, }) } @@ -46,6 +52,9 @@ impl Cluster { ) -> Result<(), ClusterError> { self.props = ClusterProps { node_provider_auth_contract: cluster_params.node_provider_auth_contract, + erasure_coding_required: cluster_params.erasure_coding_required, + erasure_coding_total: cluster_params.erasure_coding_total, + replication_total: cluster_params.replication_total, }; Ok(()) } diff --git a/pallets/ddc-clusters/src/lib.rs b/pallets/ddc-clusters/src/lib.rs index 937e5b7c6..337b04ed4 100644 --- a/pallets/ddc-clusters/src/lib.rs +++ b/pallets/ddc-clusters/src/lib.rs @@ -26,6 +26,8 @@ pub(crate) mod mock; #[cfg(test)] mod tests; +pub mod migration; + use ddc_primitives::{ traits::{ cluster::{ClusterCreator, ClusterVisitor, ClusterVisitorError}, @@ -66,7 +68,7 @@ pub mod pallet { /// The current storage version. const STORAGE_VERSION: frame_support::traits::StorageVersion = - frame_support::traits::StorageVersion::new(0); + frame_support::traits::StorageVersion::new(1); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -81,6 +83,12 @@ pub mod pallet { type StakerCreator: StakerCreator>; type Currency: LockableCurrency>; type WeightInfo: WeightInfo; + #[pallet::constant] + type MinErasureCodingRequiredLimit: Get; + #[pallet::constant] + type MinErasureCodingTotalLimit: Get; + #[pallet::constant] + type MinReplicationTotalLimit: Get; } #[pallet::event] @@ -111,6 +119,9 @@ pub mod pallet { NodeAuthContractCallFailed, NodeAuthContractDeployFailed, NodeAuthNodeAuthorizationNotSuccessful, + ErasureCodingRequiredDidNotMeetMinimum, + ErasureCodingTotalNotMeetMinimum, + ReplicationTotalDidNotMeetMinimum, } #[pallet::storage] @@ -171,6 +182,9 @@ pub mod pallet { .props .node_provider_auth_contract .clone(), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 }, self.clusters_gov_params .iter() @@ -297,6 +311,18 @@ pub mod pallet { let mut cluster = Clusters::::try_get(cluster_id).map_err(|_| Error::::ClusterDoesNotExist)?; ensure!(cluster.manager_id == caller_id, Error::::OnlyClusterManager); + ensure!( + cluster_params.erasure_coding_required >= T::MinErasureCodingRequiredLimit::get(), + Error::::ErasureCodingRequiredDidNotMeetMinimum + ); + ensure!( + cluster_params.erasure_coding_total >= T::MinErasureCodingTotalLimit::get(), + Error::::ErasureCodingTotalNotMeetMinimum + ); + ensure!( + cluster_params.replication_total >= T::MinReplicationTotalLimit::get(), + Error::::ReplicationTotalDidNotMeetMinimum + ); cluster.set_params(cluster_params).map_err(Into::>::into)?; Clusters::::insert(cluster_id, cluster); Self::deposit_event(Event::::ClusterParamsSet { cluster_id }); @@ -330,11 +356,24 @@ pub mod pallet { cluster_params: ClusterParams, cluster_gov_params: ClusterGovParams, BlockNumberFor>, ) -> DispatchResult { + ensure!(!Clusters::::contains_key(cluster_id), Error::::ClusterAlreadyExists); + + ensure!( + cluster_params.erasure_coding_required >= T::MinErasureCodingRequiredLimit::get(), + Error::::ErasureCodingRequiredDidNotMeetMinimum + ); + ensure!( + cluster_params.erasure_coding_total >= T::MinErasureCodingTotalLimit::get(), + Error::::ErasureCodingTotalNotMeetMinimum + ); + ensure!( + cluster_params.replication_total >= T::MinReplicationTotalLimit::get(), + Error::::ReplicationTotalDidNotMeetMinimum + ); + let cluster = Cluster::new(cluster_id, cluster_manager_id, cluster_reserve_id, cluster_params) .map_err(Into::>::into)?; - ensure!(!Clusters::::contains_key(cluster_id), Error::::ClusterAlreadyExists); - Clusters::::insert(cluster_id, cluster); ClustersGovParams::::insert(cluster_id, cluster_gov_params); Self::deposit_event(Event::::ClusterCreated { cluster_id }); diff --git a/pallets/ddc-clusters/src/migration.rs b/pallets/ddc-clusters/src/migration.rs new file mode 100644 index 000000000..cdb17d46e --- /dev/null +++ b/pallets/ddc-clusters/src/migration.rs @@ -0,0 +1,198 @@ +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +use frame_support::{ + storage_alias, + traits::{Get, GetStorageVersion, OnRuntimeUpgrade, StorageVersion}, + weights::Weight, +}; +use log::info; +#[cfg(feature = "try-runtime")] +use sp_runtime::DispatchError; +use sp_runtime::Saturating; + +use super::*; +use crate::cluster::ClusterProps; + +const LOG_TARGET: &str = "ddc-clusters"; + +pub mod v0 { + use frame_support::pallet_prelude::*; + + use super::*; + + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] + pub struct Cluster { + pub cluster_id: ClusterId, + pub manager_id: AccountId, + pub reserve_id: AccountId, + pub props: ClusterProps, + } + + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] + pub struct ClusterProps { + pub node_provider_auth_contract: Option, + } + + #[storage_alias] + pub(super) type Clusters = StorageMap< + crate::Pallet, + Blake2_128Concat, + ClusterId, + Cluster<::AccountId>, + >; +} + +pub fn migrate_to_v1() -> Weight { + let on_chain_version = Pallet::::on_chain_storage_version(); + let current_version = Pallet::::current_storage_version(); + + info!( + target: LOG_TARGET, + "Running migration with current storage version {:?} / onchain {:?}", + current_version, + on_chain_version + ); + + if on_chain_version == 0 && current_version == 1 { + let mut translated = 0u64; + let count = v0::Clusters::::iter().count(); + info!( + target: LOG_TARGET, + " >>> Updating DDC Cluster storage. Migrating {} clusters...", count + ); + + Clusters::::translate::, _>( + |cluster_id: ClusterId, cluster: v0::Cluster| { + info!(target: LOG_TARGET, " Migrating cluster for cluster ID {:?}...", cluster_id); + translated.saturating_inc(); + let props = ClusterProps { + node_provider_auth_contract: cluster.props.node_provider_auth_contract, + erasure_coding_required: 16, + erasure_coding_total: 48, + replication_total: 20, + }; + + Some(Cluster { + cluster_id: cluster.cluster_id, + manager_id: cluster.manager_id, + reserve_id: cluster.reserve_id, + props, + }) + }, + ); + + // Update storage version. + StorageVersion::new(1).put::>(); + info!( + target: LOG_TARGET, + "Upgraded {} records, storage to version {:?}", + translated, + current_version + ); + + T::DbWeight::get().reads_writes(translated + 1, translated + 1) + } else { + info!(target: LOG_TARGET, " >>> Unused migration!"); + T::DbWeight::get().reads(1) + } +} +pub struct MigrateToV1(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for MigrateToV1 { + fn on_runtime_upgrade() -> Weight { + migrate_to_v1::() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, DispatchError> { + let prev_count = v0::Clusters::::iter().count(); + + Ok((prev_count as u64).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(prev_state: Vec) -> Result<(), DispatchError> { + let prev_count: u64 = + Decode::decode(&mut &prev_state[..]).expect("pre_upgrade provides a valid state; qed"); + + let post_count = Clusters::::iter().count() as u64; + ensure!( + prev_count == post_count, + "the cluster count before and after the migration should be the same" + ); + + let current_version = Pallet::::current_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); + + frame_support::ensure!(current_version == 1, "must_upgrade"); + ensure!( + current_version == on_chain_version, + "after migration, the current_version and on_chain_version should be the same" + ); + Ok(()) + } +} + +#[cfg(test)] +#[cfg(feature = "try-runtime")] +mod test { + + use frame_support::pallet_prelude::StorageVersion; + + use super::*; + use crate::mock::{Test as T, *}; + + #[test] + fn cluster_migration_works() { + ExtBuilder.build_and_execute(|| { + let cluster_id0 = ClusterId::from([0; 20]); + let cluster_id1 = ClusterId::from([1; 20]); + let cluster_id2 = ClusterId::from([2; 20]); + let cluster_manager_id = AccountId::from([1; 32]); + let cluster_reserve_id = AccountId::from([2; 32]); + let auth_contract = AccountId::from([3; 32]); + + assert_eq!(StorageVersion::get::>(), 0); + + let cluster1 = v0::Cluster { + cluster_id: cluster_id1, + manager_id: cluster_manager_id.clone(), + reserve_id: cluster_reserve_id.clone(), + props: v0::ClusterProps { + node_provider_auth_contract: Some(auth_contract.clone()), + }, + }; + + v0::Clusters::::insert(cluster_id1, cluster1); + let cluster2 = v0::Cluster { + cluster_id: cluster_id2, + manager_id: cluster_manager_id, + reserve_id: cluster_reserve_id, + props: v0::ClusterProps { + node_provider_auth_contract: Some(auth_contract.clone()), + }, + }; + + v0::Clusters::::insert(cluster_id2, cluster2); + let cluster_count = v0::Clusters::::iter_values().count() as u32; + + assert_eq!(cluster_count, 3); + let state = MigrateToV1::::pre_upgrade().unwrap(); + let _weight = MigrateToV1::::on_runtime_upgrade(); + MigrateToV1::::post_upgrade(state).unwrap(); + + let cluster_count_after_upgrade = Clusters::::iter_values().count() as u32; + + assert_eq!(StorageVersion::get::>(), 1); + assert_eq!(cluster_count_after_upgrade, 3); + assert_eq!(Clusters::::get(cluster_id0).unwrap().props.erasure_coding_required, 16); + assert_eq!(Clusters::::get(cluster_id0).unwrap().props.erasure_coding_total, 48); + assert_eq!(Clusters::::get(cluster_id0).unwrap().props.replication_total, 20); + assert_eq!(Clusters::::get(cluster_id1).unwrap().props.erasure_coding_required, 16); + assert_eq!(Clusters::::get(cluster_id1).unwrap().props.erasure_coding_total, 48); + assert_eq!(Clusters::::get(cluster_id1).unwrap().props.replication_total, 20); + assert_eq!(Clusters::::get(cluster_id2).unwrap().props.erasure_coding_required, 16); + assert_eq!(Clusters::::get(cluster_id2).unwrap().props.erasure_coding_total, 48); + assert_eq!(Clusters::::get(cluster_id2).unwrap().props.replication_total, 20); + }); + } +} diff --git a/pallets/ddc-clusters/src/mock.rs b/pallets/ddc-clusters/src/mock.rs index f7c7fc5a5..b333afb9a 100644 --- a/pallets/ddc-clusters/src/mock.rs +++ b/pallets/ddc-clusters/src/mock.rs @@ -203,6 +203,9 @@ impl crate::pallet::Config for Test { type StakingVisitor = TestStakingVisitor; type StakerCreator = TestStaker; type WeightInfo = (); + type MinErasureCodingRequiredLimit = ConstU32<4>; + type MinErasureCodingTotalLimit = ConstU32<6>; + type MinReplicationTotalLimit = ConstU32<3>; } pub(crate) type DdcStakingCall = crate::Call; @@ -277,7 +280,12 @@ impl ExtBuilder { ClusterId::from([0; 20]), AccountId::from([0; 32]), AccountId::from([0; 32]), - ClusterParams { node_provider_auth_contract: Some(AccountId::from([0; 32])) }, + ClusterParams { + node_provider_auth_contract: Some(AccountId::from([0; 32])), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3, + }, ) { let _ = pallet_ddc_clusters::GenesisConfig:: { clusters: vec![cluster], diff --git a/pallets/ddc-clusters/src/testing_utils.rs b/pallets/ddc-clusters/src/testing_utils.rs index 9a8b74002..a09688a1f 100644 --- a/pallets/ddc-clusters/src/testing_utils.rs +++ b/pallets/ddc-clusters/src/testing_utils.rs @@ -20,7 +20,12 @@ pub fn config_cluster(user: T::AccountId, cluster_id: ClusterId) where T::AccountId: UncheckedFrom + AsRef<[u8]>, { - let cluster_params = ClusterParams { node_provider_auth_contract: Some(user.clone()) }; + let cluster_params = ClusterParams { + node_provider_auth_contract: Some(user.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3, + }; let cluster_gov_params: ClusterGovParams, BlockNumberFor> = ClusterGovParams { treasury_share: Perquintill::default(), validators_share: Perquintill::default(), @@ -52,7 +57,12 @@ pub fn config_cluster_and_node( where T::AccountId: UncheckedFrom + AsRef<[u8]>, { - let cluster_params = ClusterParams { node_provider_auth_contract: Some(user.clone()) }; + let cluster_params = ClusterParams { + node_provider_auth_contract: Some(user.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3, + }; let storage_node_params = StorageNodeParams { mode: StorageNodeMode::Storage, host: vec![1u8; 255], @@ -106,9 +116,12 @@ where auth_contract = auth_contract.deploy_contract(user.clone())?; auth_contract.authorize_node(node_pub_key)?; - let updated_cluster_params = - ClusterParams { node_provider_auth_contract: Some(auth_contract.contract_id) }; - + let updated_cluster_params = ClusterParams { + node_provider_auth_contract: Some(auth_contract.contract_id), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3, + }; // Register auth contract let _ = DdcClusters::::set_cluster_params( RawOrigin::Signed(user).into(), diff --git a/pallets/ddc-clusters/src/tests.rs b/pallets/ddc-clusters/src/tests.rs index b55d98410..1f8b50956 100644 --- a/pallets/ddc-clusters/src/tests.rs +++ b/pallets/ddc-clusters/src/tests.rs @@ -43,7 +43,12 @@ fn create_cluster_works() { cluster_id, cluster_manager_id.clone(), cluster_reserve_id.clone(), - ClusterParams { node_provider_auth_contract: Some(auth_contract.clone()) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, cluster_gov_params.clone() ), BadOrigin @@ -55,7 +60,12 @@ fn create_cluster_works() { cluster_id, cluster_manager_id.clone(), cluster_reserve_id.clone(), - ClusterParams { node_provider_auth_contract: Some(auth_contract.clone()) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, cluster_gov_params.clone() )); @@ -111,7 +121,12 @@ fn create_cluster_works() { cluster_id, cluster_manager_id, cluster_reserve_id, - ClusterParams { node_provider_auth_contract: Some(auth_contract) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, cluster_gov_params ), Error::::ClusterAlreadyExists @@ -153,7 +168,12 @@ fn add_and_delete_node_works() { cluster_id, cluster_manager_id.clone(), cluster_reserve_id.clone(), - ClusterParams { node_provider_auth_contract: Some(cluster_manager_id.clone()) }, + ClusterParams { + node_provider_auth_contract: Some(cluster_manager_id.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, ClusterGovParams { treasury_share: Perquintill::from_float(0.05), validators_share: Perquintill::from_float(0.01), @@ -219,7 +239,12 @@ fn add_and_delete_node_works() { assert_ok!(DdcClusters::set_cluster_params( RuntimeOrigin::signed(cluster_manager_id.clone()), cluster_id, - ClusterParams { node_provider_auth_contract: Some(contract_id) }, + ClusterParams { + node_provider_auth_contract: Some(contract_id), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, )); // Node added succesfully @@ -362,7 +387,12 @@ fn set_cluster_params_works() { DdcClusters::set_cluster_params( RuntimeOrigin::signed(cluster_manager_id.clone()), cluster_id, - ClusterParams { node_provider_auth_contract: Some(auth_contract_1.clone()) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract_1.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, ), Error::::ClusterDoesNotExist ); @@ -373,7 +403,12 @@ fn set_cluster_params_works() { cluster_id, cluster_manager_id.clone(), cluster_reserve_id.clone(), - ClusterParams { node_provider_auth_contract: Some(auth_contract_1) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract_1), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, ClusterGovParams { treasury_share: Perquintill::from_float(0.05), validators_share: Perquintill::from_float(0.01), @@ -392,19 +427,74 @@ fn set_cluster_params_works() { DdcClusters::set_cluster_params( RuntimeOrigin::signed(cluster_reserve_id), cluster_id, - ClusterParams { node_provider_auth_contract: Some(auth_contract_2.clone()) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract_2.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, ), Error::::OnlyClusterManager ); + assert_noop!( + DdcClusters::set_cluster_params( + RuntimeOrigin::signed(cluster_manager_id.clone()), + cluster_id, + ClusterParams { + node_provider_auth_contract: Some(auth_contract_2.clone()), + erasure_coding_required: 1, + erasure_coding_total: 6, + replication_total: 3 + }, + ), + Error::::ErasureCodingRequiredDidNotMeetMinimum + ); + + assert_noop!( + DdcClusters::set_cluster_params( + RuntimeOrigin::signed(cluster_manager_id.clone()), + cluster_id, + ClusterParams { + node_provider_auth_contract: Some(auth_contract_2.clone()), + erasure_coding_required: 4, + erasure_coding_total: 1, + replication_total: 3 + }, + ), + Error::::ErasureCodingTotalNotMeetMinimum + ); + + assert_noop!( + DdcClusters::set_cluster_params( + RuntimeOrigin::signed(cluster_manager_id.clone()), + cluster_id, + ClusterParams { + node_provider_auth_contract: Some(auth_contract_2.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 1 + }, + ), + Error::::ReplicationTotalDidNotMeetMinimum + ); + assert_ok!(DdcClusters::set_cluster_params( RuntimeOrigin::signed(cluster_manager_id), cluster_id, - ClusterParams { node_provider_auth_contract: Some(auth_contract_2.clone()) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract_2.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, )); let updated_cluster = DdcClusters::clusters(cluster_id).unwrap(); assert_eq!(updated_cluster.props.node_provider_auth_contract, Some(auth_contract_2)); + assert_eq!(updated_cluster.props.erasure_coding_required, 4); + assert_eq!(updated_cluster.props.erasure_coding_total, 6); + assert_eq!(updated_cluster.props.replication_total, 3); // Checking that event was emitted assert_eq!(System::events().len(), 2); @@ -450,7 +540,12 @@ fn set_cluster_gov_params_works() { cluster_id, cluster_manager_id.clone(), cluster_reserve_id, - ClusterParams { node_provider_auth_contract: Some(auth_contract) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, cluster_gov_params.clone() )); @@ -556,7 +651,12 @@ fn cluster_visitor_works() { cluster_id, cluster_manager_id, cluster_reserve_id.clone(), - ClusterParams { node_provider_auth_contract: Some(auth_contract) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, cluster_gov_params )); @@ -663,7 +763,12 @@ fn cluster_creator_works() { cluster_id, cluster_manager_id, cluster_reserve_id, - ClusterParams { node_provider_auth_contract: Some(auth_contract) }, + ClusterParams { + node_provider_auth_contract: Some(auth_contract), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, cluster_gov_params )); diff --git a/pallets/ddc-customers/src/benchmarking.rs b/pallets/ddc-customers/src/benchmarking.rs index 263239b6d..60ec90c82 100644 --- a/pallets/ddc-customers/src/benchmarking.rs +++ b/pallets/ddc-customers/src/benchmarking.rs @@ -38,7 +38,12 @@ benchmarks! { ClusterId::from([1; 20]), user.clone(), user.clone(), - ClusterParams { node_provider_auth_contract: Some(user.clone()) }, + ClusterParams { + node_provider_auth_contract: Some(user.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3 + }, cluster_gov_params ); diff --git a/pallets/ddc-payouts/src/benchmarking.rs b/pallets/ddc-payouts/src/benchmarking.rs index dde17e11d..d92f86693 100644 --- a/pallets/ddc-payouts/src/benchmarking.rs +++ b/pallets/ddc-payouts/src/benchmarking.rs @@ -60,7 +60,12 @@ fn create_cluster( fn create_default_cluster(cluster_id: ClusterId) { let cluster_manager = create_account::("cm", 0, 0); let cluster_reserve = create_account::("cr", 0, 0); - let cluster_params = ClusterParams { node_provider_auth_contract: Default::default() }; + let cluster_params = ClusterParams { + node_provider_auth_contract: Default::default(), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3, + }; let cluster_gov_params: ClusterGovParams, BlockNumberFor> = ClusterGovParams { treasury_share: Perquintill::from_percent(5), validators_share: Perquintill::from_percent(10), diff --git a/pallets/ddc-staking/src/testing_utils.rs b/pallets/ddc-staking/src/testing_utils.rs index 16788cb27..c302b2350 100644 --- a/pallets/ddc-staking/src/testing_utils.rs +++ b/pallets/ddc-staking/src/testing_utils.rs @@ -111,7 +111,12 @@ pub fn create_stash_controller_node_with_balance( } let cluster_id = ClusterId::from([1; 20]); - let cluster_params = ClusterParams { node_provider_auth_contract: Some(stash.clone()) }; + let cluster_params = ClusterParams { + node_provider_auth_contract: Some(stash.clone()), + erasure_coding_required: 4, + erasure_coding_total: 6, + replication_total: 3, + }; let cluster_gov_params: ClusterGovParams, BlockNumberFor> = ClusterGovParams { treasury_share: Perquintill::default(), validators_share: Perquintill::default(), diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index 45e77e553..509db89d7 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -20,6 +20,9 @@ pub type StorageNodePubKey = AccountId32; #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo, PartialEq)] pub struct ClusterParams { pub node_provider_auth_contract: Option, + pub erasure_coding_required: u32, + pub erasure_coding_total: u32, + pub replication_total: u32, } // ClusterGovParams includes Governance sensitive parameters diff --git a/runtime/cere-dev/src/lib.rs b/runtime/cere-dev/src/lib.rs index ac21adf01..5ea327e61 100644 --- a/runtime/cere-dev/src/lib.rs +++ b/runtime/cere-dev/src/lib.rs @@ -140,10 +140,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 53001, + spec_version: 53002, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 16, + transaction_version: 17, state_version: 0, }; @@ -1150,6 +1150,9 @@ impl pallet_ddc_clusters::Config for Runtime { type StakerCreator = pallet_ddc_staking::Pallet; type Currency = Balances; type WeightInfo = pallet_ddc_clusters::weights::SubstrateWeight; + type MinErasureCodingRequiredLimit = ConstU32<4>; + type MinErasureCodingTotalLimit = ConstU32<6>; + type MinReplicationTotalLimit = ConstU32<3>; } parameter_types! { @@ -1321,6 +1324,7 @@ pub mod migrations { /// Unreleased migrations. Add new ones here: pub type Unreleased = ( + pallet_ddc_clusters::migration::MigrateToV1, pallet_contracts::migration::Migration, pallet_referenda::migration::v1::MigrateV0ToV1, // Gov v1 storage migrations diff --git a/runtime/cere/src/lib.rs b/runtime/cere/src/lib.rs index 5535f6dd7..07fc82db9 100644 --- a/runtime/cere/src/lib.rs +++ b/runtime/cere/src/lib.rs @@ -134,10 +134,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 53001, + spec_version: 53002, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 16, + transaction_version: 17, state_version: 0, }; @@ -1135,6 +1135,9 @@ impl pallet_ddc_clusters::Config for Runtime { type StakerCreator = pallet_ddc_staking::Pallet; type Currency = Balances; type WeightInfo = pallet_ddc_clusters::weights::SubstrateWeight; + type MinErasureCodingRequiredLimit = ConstU32<4>; + type MinErasureCodingTotalLimit = ConstU32<6>; + type MinReplicationTotalLimit = ConstU32<3>; } impl pallet_ddc_nodes::Config for Runtime { @@ -1323,6 +1326,7 @@ pub mod migrations { /// Unreleased migrations. Add new ones here: pub type Unreleased = ( + pallet_ddc_clusters::migration::MigrateToV1, pallet_contracts::migration::Migration, pallet_referenda::migration::v1::MigrateV0ToV1, // Gov v1 storage migrations