diff --git a/Cargo.lock b/Cargo.lock index 7fc2951265..4f3f476a82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10466,6 +10466,7 @@ dependencies = [ "starknet_monitoring_endpoint", "starknet_sequencer_infra", "starknet_sequencer_node", + "starknet_state_sync", "starknet_task_executor", "strum 0.25.0", "tempfile", @@ -10652,6 +10653,7 @@ dependencies = [ "starknet_sequencer_infra", "starknet_sequencer_node", "starknet_sierra_compile", + "starknet_state_sync", "starknet_state_sync_types", "thiserror", "tokio", diff --git a/config/sequencer/default_config.json b/config/sequencer/default_config.json index 5199edd4b2..16c6f231e5 100644 --- a/config/sequencer/default_config.json +++ b/config/sequencer/default_config.json @@ -944,6 +944,176 @@ "privacy": "Public", "value": "" }, + "state_sync_config.network_config.advertised_multiaddr": { + "description": "The external address other peers see this node. If this is set, the node will not try to find out which addresses it has and will write this address as external instead", + "privacy": "Public", + "value": "" + }, + "state_sync_config.network_config.advertised_multiaddr.#is_none": { + "description": "Flag for an optional field.", + "privacy": "TemporaryValue", + "value": true + }, + "state_sync_config.network_config.bootstrap_peer_multiaddr": { + "description": "The multiaddress of the peer node. It should include the peer's id. For more info: https://docs.libp2p.io/concepts/fundamentals/peers/", + "privacy": "Public", + "value": "" + }, + "state_sync_config.network_config.bootstrap_peer_multiaddr.#is_none": { + "description": "Flag for an optional field.", + "privacy": "TemporaryValue", + "value": true + }, + "state_sync_config.network_config.chain_id": { + "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", + "pointer_target": "chain_id", + "privacy": "Public" + }, + "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.base_delay_millis": { + "description": "The base delay in milliseconds for the exponential backoff strategy.", + "privacy": "Public", + "value": 2 + }, + "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.factor": { + "description": "The factor for the exponential backoff strategy.", + "privacy": "Public", + "value": 5 + }, + "state_sync_config.network_config.discovery_config.bootstrap_dial_retry_config.max_delay_seconds": { + "description": "The maximum delay in seconds for the exponential backoff strategy.", + "privacy": "Public", + "value": 5 + }, + "state_sync_config.network_config.discovery_config.heartbeat_interval": { + "description": "The interval between each discovery (Kademlia) query in milliseconds.", + "privacy": "Public", + "value": 100 + }, + "state_sync_config.network_config.idle_connection_timeout": { + "description": "Amount of time in seconds that a connection with no active sessions will stay alive.", + "privacy": "Public", + "value": 120 + }, + "state_sync_config.network_config.peer_manager_config.malicious_timeout_seconds": { + "description": "The duration in seconds a peer is blacklisted after being marked as malicious.", + "privacy": "Public", + "value": 31536000 + }, + "state_sync_config.network_config.peer_manager_config.unstable_timeout_millis": { + "description": "The duration in milliseconds a peer blacklisted after being reported as unstable.", + "privacy": "Public", + "value": 1000 + }, + "state_sync_config.network_config.quic_port": { + "description": "The port that the node listens on for incoming quic connections.", + "privacy": "Public", + "value": 10001 + }, + "state_sync_config.network_config.secret_key": { + "description": "The secret key used for building the peer id. If it's an empty string a random one will be used.", + "privacy": "Private", + "value": "" + }, + "state_sync_config.network_config.session_timeout": { + "description": "Maximal time in seconds that each session can take before failing on timeout.", + "privacy": "Public", + "value": 120 + }, + "state_sync_config.network_config.tcp_port": { + "description": "The port that the node listens on for incoming tcp connections.", + "privacy": "Public", + "value": 10000 + }, + "state_sync_config.p2p_sync_client_config.buffer_size": { + "description": "Size of the buffer for read from the storage and for incoming responses.", + "privacy": "Public", + "value": 100000 + }, + "state_sync_config.p2p_sync_client_config.num_block_classes_per_query": { + "description": "The maximum amount of block's classes to ask from peers in each iteration.", + "privacy": "Public", + "value": 100 + }, + "state_sync_config.p2p_sync_client_config.num_block_state_diffs_per_query": { + "description": "The maximum amount of block's state diffs to ask from peers in each iteration.", + "privacy": "Public", + "value": 100 + }, + "state_sync_config.p2p_sync_client_config.num_block_transactions_per_query": { + "description": "The maximum amount of blocks to ask their transactions from peers in each iteration.", + "privacy": "Public", + "value": 100 + }, + "state_sync_config.p2p_sync_client_config.num_headers_per_query": { + "description": "The maximum amount of headers to ask from peers in each iteration.", + "privacy": "Public", + "value": 10000 + }, + "state_sync_config.p2p_sync_client_config.stop_sync_at_block_number": { + "description": "Stops the sync at given block number and closes the node cleanly. Used to run profiling on the node.", + "privacy": "Public", + "value": 1000 + }, + "state_sync_config.p2p_sync_client_config.stop_sync_at_block_number.#is_none": { + "description": "Flag for an optional field.", + "privacy": "TemporaryValue", + "value": true + }, + "state_sync_config.p2p_sync_client_config.wait_period_for_new_data": { + "description": "Time in seconds to wait when a query returned with partial data before sending a new query", + "privacy": "Public", + "value": 5 + }, + "state_sync_config.storage_config.db_config.chain_id": { + "description": "The chain to follow. For more details see https://docs.starknet.io/documentation/architecture_and_concepts/Blocks/transactions/#chain-id.", + "pointer_target": "chain_id", + "privacy": "Public" + }, + "state_sync_config.storage_config.db_config.enforce_file_exists": { + "description": "Whether to enforce that the path exists. If true, `open_env` fails when the mdbx.dat file does not exist.", + "privacy": "Public", + "value": false + }, + "state_sync_config.storage_config.db_config.growth_step": { + "description": "The growth step in bytes, must be greater than zero to allow the database to grow.", + "privacy": "Public", + "value": 4294967296 + }, + "state_sync_config.storage_config.db_config.max_size": { + "description": "The maximum size of the node's storage in bytes.", + "privacy": "Public", + "value": 1099511627776 + }, + "state_sync_config.storage_config.db_config.min_size": { + "description": "The minimum size of the node's storage in bytes.", + "privacy": "Public", + "value": 1048576 + }, + "state_sync_config.storage_config.db_config.path_prefix": { + "description": "Prefix of the path of the node's storage directory, the storage file path will be /. The path is not created automatically.", + "privacy": "Public", + "value": "./data" + }, + "state_sync_config.storage_config.mmap_file_config.growth_step": { + "description": "The growth step in bytes, must be greater than max_object_size.", + "privacy": "Public", + "value": 1073741824 + }, + "state_sync_config.storage_config.mmap_file_config.max_object_size": { + "description": "The maximum size of a single object in the file in bytes", + "privacy": "Public", + "value": 268435456 + }, + "state_sync_config.storage_config.mmap_file_config.max_size": { + "description": "The maximum size of a memory mapped file in bytes. Must be greater than growth_step.", + "privacy": "Public", + "value": 1099511627776 + }, + "state_sync_config.storage_config.scope": { + "description": "The categories of data saved in storage.", + "privacy": "Public", + "value": "FullArchive" + }, "strk_fee_token_address": { "description": "A required param! Address of the STRK fee token.", "param_type": "String", diff --git a/crates/starknet_integration_tests/Cargo.toml b/crates/starknet_integration_tests/Cargo.toml index 64c4cdf8e3..f4f9b475e3 100644 --- a/crates/starknet_integration_tests/Cargo.toml +++ b/crates/starknet_integration_tests/Cargo.toml @@ -36,6 +36,7 @@ starknet_mempool_p2p.workspace = true starknet_monitoring_endpoint = { workspace = true, features = ["testing"] } starknet_sequencer_infra = { workspace = true, features = ["testing"] } starknet_sequencer_node = { workspace = true, features = ["testing"] } +starknet_state_sync.workspace = true starknet_task_executor.workspace = true strum.workspace = true tempfile.workspace = true diff --git a/crates/starknet_integration_tests/src/config_utils.rs b/crates/starknet_integration_tests/src/config_utils.rs index bdab8d0c89..6e8a1edee7 100644 --- a/crates/starknet_integration_tests/src/config_utils.rs +++ b/crates/starknet_integration_tests/src/config_utils.rs @@ -52,6 +52,8 @@ pub(crate) fn dump_config_file_changes( config.http_server_config.ip, config.http_server_config.port, config.consensus_manager_config.consensus_config.start_height, + config.state_sync_config.storage_config.db_config.path_prefix, + config.state_sync_config.network_config.tcp_port, ); let node_config_path = dump_json_data(json_data, NODE_CONFIG_CHANGES_FILE_PATH, dir); assert!(node_config_path.exists(), "File does not exist: {:?}", node_config_path); diff --git a/crates/starknet_integration_tests/src/flow_test_setup.rs b/crates/starknet_integration_tests/src/flow_test_setup.rs index 4aed6aca04..c34d177b37 100644 --- a/crates/starknet_integration_tests/src/flow_test_setup.rs +++ b/crates/starknet_integration_tests/src/flow_test_setup.rs @@ -28,6 +28,7 @@ pub struct FlowTestSetup { // Handlers for the storage files, maintained so the files are not deleted. pub batcher_storage_file_handle: TempDir, pub rpc_storage_file_handle: TempDir, + pub state_sync_storage_file_handle: TempDir, // Handle of the sequencer node. pub sequencer_node_handle: JoinHandle>, @@ -56,9 +57,13 @@ impl FlowTestSetup { .await; // Derive the configuration for the sequencer node. - let (config, _required_params, consensus_proposals_channels) = - create_config(chain_info, rpc_server_addr, storage_for_test.batcher_storage_config) - .await; + let (config, _required_params, consensus_proposals_channels) = create_config( + chain_info, + rpc_server_addr, + storage_for_test.batcher_storage_config, + storage_for_test.state_sync_storage_config, + ) + .await; let (_clients, servers) = create_node_modules(&config); @@ -79,6 +84,7 @@ impl FlowTestSetup { add_tx_http_client, batcher_storage_file_handle: storage_for_test.batcher_storage_handle, rpc_storage_file_handle: storage_for_test.rpc_storage_handle, + state_sync_storage_file_handle: storage_for_test.state_sync_storage_handle, sequencer_node_handle, consensus_proposals_channels, } diff --git a/crates/starknet_integration_tests/src/integration_test_setup.rs b/crates/starknet_integration_tests/src/integration_test_setup.rs index 08d1f930d0..bb0f2b93e3 100644 --- a/crates/starknet_integration_tests/src/integration_test_setup.rs +++ b/crates/starknet_integration_tests/src/integration_test_setup.rs @@ -22,6 +22,8 @@ pub struct IntegrationTestSetup { pub node_config_path: PathBuf, // Storage reader for the batcher. pub batcher_storage_config: StorageConfig, + // Storage reader for the state sync. + pub state_sync_storage_config: StorageConfig, // Handlers for the storage and config files, maintained so the files are not deleted. Since // these are only maintained to avoid dropping the handlers, private visibility suffices, and // as such, the '#[allow(dead_code)]' attributes are used to suppress the warning. @@ -31,6 +33,8 @@ pub struct IntegrationTestSetup { rpc_storage_handle: TempDir, #[allow(dead_code)] node_config_dir_handle: TempDir, + #[allow(dead_code)] + state_sync_storage_handle: TempDir, } impl IntegrationTestSetup { @@ -47,9 +51,13 @@ impl IntegrationTestSetup { .await; // Derive the configuration for the sequencer node. - let (config, required_params, _) = - create_config(chain_info, rpc_server_addr, storage_for_test.batcher_storage_config) - .await; + let (config, required_params, _) = create_config( + chain_info, + rpc_server_addr, + storage_for_test.batcher_storage_config, + storage_for_test.state_sync_storage_config, + ) + .await; let node_config_dir_handle = tempdir().unwrap(); let node_config_path = dump_config_file_changes( @@ -73,6 +81,8 @@ impl IntegrationTestSetup { rpc_storage_handle: storage_for_test.rpc_storage_handle, node_config_dir_handle, node_config_path, + state_sync_storage_handle: storage_for_test.state_sync_storage_handle, + state_sync_storage_config: config.state_sync_config.storage_config, } } } diff --git a/crates/starknet_integration_tests/src/state_reader.rs b/crates/starknet_integration_tests/src/state_reader.rs index 0dedd6055f..6c15efe350 100644 --- a/crates/starknet_integration_tests/src/state_reader.rs +++ b/crates/starknet_integration_tests/src/state_reader.rs @@ -51,10 +51,13 @@ type ContractClassesMap = (Vec<(ClassHash, DeprecatedContractClass)>, Vec<(ClassHash, CasmContractClass)>); pub struct StorageTestSetup { + // TODO: Decide if we want to keep the rpc and batcher storage instances. pub rpc_storage_reader: StorageReader, pub rpc_storage_handle: TempDir, pub batcher_storage_config: StorageConfig, pub batcher_storage_handle: TempDir, + pub state_sync_storage_config: StorageConfig, + pub state_sync_storage_handle: TempDir, } impl StorageTestSetup { @@ -67,12 +70,23 @@ impl StorageTestSetup { .scope(StorageScope::StateOnly) .chain_id(chain_info.chain_id.clone()) .build(); - create_test_state(&mut batcher_storage_writer, chain_info, test_defined_accounts); + create_test_state(&mut batcher_storage_writer, chain_info, test_defined_accounts.clone()); + let ( + (_, mut state_sync_storage_writer), + state_sync_storage_config, + state_sync_storage_handle, + ) = TestStorageBuilder::default() + .scope(StorageScope::FullArchive) + .chain_id(chain_info.chain_id.clone()) + .build(); + create_test_state(&mut state_sync_storage_writer, chain_info, test_defined_accounts); Self { rpc_storage_reader, rpc_storage_handle: rpc_storage_file_handle, batcher_storage_config, batcher_storage_handle: batcher_storage_file_handle, + state_sync_storage_config, + state_sync_storage_handle, } } } diff --git a/crates/starknet_integration_tests/src/utils.rs b/crates/starknet_integration_tests/src/utils.rs index 2cbf630e05..d2cbda8418 100644 --- a/crates/starknet_integration_tests/src/utils.rs +++ b/crates/starknet_integration_tests/src/utils.rs @@ -9,6 +9,7 @@ use mempool_test_utils::starknet_api_test_utils::{AccountId, MultiAccountTransac use papyrus_consensus::config::ConsensusConfig; use papyrus_network::network_manager::test_utils::create_network_configs_connected_to_broadcast_channels; use papyrus_network::network_manager::BroadcastTopicChannels; +use papyrus_network::NetworkConfig; use papyrus_protobuf::consensus::{ProposalPart, StreamMessage}; use papyrus_storage::StorageConfig; use starknet_api::block::BlockNumber; @@ -27,6 +28,7 @@ use starknet_http_server::config::HttpServerConfig; use starknet_sequencer_infra::test_utils::get_available_socket; use starknet_sequencer_node::config::node_config::SequencerNodeConfig; use starknet_sequencer_node::config::test_utils::RequiredParams; +use starknet_state_sync::config::StateSyncConfig; pub fn create_chain_info() -> ChainInfo { let mut chain_info = ChainInfo::create_for_testing(); @@ -40,6 +42,7 @@ pub async fn create_config( chain_info: ChainInfo, rpc_server_addr: SocketAddr, batcher_storage_config: StorageConfig, + state_sync_storage_config: StorageConfig, ) -> (SequencerNodeConfig, RequiredParams, BroadcastTopicChannels>) { let fee_token_addresses = chain_info.fee_token_addresses.clone(); let batcher_config = create_batcher_config(batcher_storage_config, chain_info.clone()); @@ -49,6 +52,7 @@ pub async fn create_config( let (mut consensus_manager_configs, consensus_proposals_channels) = create_consensus_manager_configs_and_channels(1); let consensus_manager_config = consensus_manager_configs.pop().unwrap(); + let state_sync_config = create_state_sync_config(state_sync_storage_config); ( SequencerNodeConfig { batcher_config, @@ -56,6 +60,7 @@ pub async fn create_config( gateway_config, http_server_config, rpc_state_reader_config, + state_sync_config, ..SequencerNodeConfig::default() }, RequiredParams { @@ -227,3 +232,15 @@ pub fn create_batcher_config( ..Default::default() } } + +pub fn create_state_sync_config(state_sync_storage_config: StorageConfig) -> StateSyncConfig { + const STATE_SYNC_NETWORK_CONFIG_TCP_PORT_FOR_TESTING: u16 = 12345; + StateSyncConfig { + storage_config: state_sync_storage_config, + network_config: NetworkConfig { + tcp_port: STATE_SYNC_NETWORK_CONFIG_TCP_PORT_FOR_TESTING, + ..Default::default() + }, + ..Default::default() + } +} diff --git a/crates/starknet_integration_tests/tests/mempool_p2p_flow_test.rs b/crates/starknet_integration_tests/tests/mempool_p2p_flow_test.rs index 277b601fe2..d635c55bdc 100644 --- a/crates/starknet_integration_tests/tests/mempool_p2p_flow_test.rs +++ b/crates/starknet_integration_tests/tests/mempool_p2p_flow_test.rs @@ -17,6 +17,7 @@ use starknet_integration_tests::utils::{ create_gateway_config, create_http_server_config, create_integration_test_tx_generator, + create_state_sync_config, run_integration_test_scenario, test_rpc_state_reader_config, }; @@ -76,6 +77,7 @@ async fn test_mempool_sends_tx_to_other_peer(mut tx_generator: MultiAccountTrans let gateway_config = create_gateway_config(chain_info).await; let http_server_config = create_http_server_config().await; let rpc_state_reader_config = test_rpc_state_reader_config(rpc_server_addr); + let state_sync_config = create_state_sync_config(storage_for_test.state_sync_storage_config); let (mut network_configs, mut broadcast_channels) = create_network_configs_connected_to_broadcast_channels::( 1, @@ -90,6 +92,7 @@ async fn test_mempool_sends_tx_to_other_peer(mut tx_generator: MultiAccountTrans http_server_config, rpc_state_reader_config, mempool_p2p_config, + state_sync_config, ..SequencerNodeConfig::default() }; diff --git a/crates/starknet_sequencer_node/Cargo.toml b/crates/starknet_sequencer_node/Cargo.toml index 29aa72a10a..b85f1df975 100644 --- a/crates/starknet_sequencer_node/Cargo.toml +++ b/crates/starknet_sequencer_node/Cargo.toml @@ -35,6 +35,7 @@ starknet_mempool_types.workspace = true starknet_monitoring_endpoint.workspace = true starknet_sequencer_infra.workspace = true starknet_sierra_compile.workspace = true +starknet_state_sync.workspace = true starknet_state_sync_types.workspace = true thiserror = { workspace = true, optional = true } tokio.workspace = true diff --git a/crates/starknet_sequencer_node/src/components.rs b/crates/starknet_sequencer_node/src/components.rs index 266f37f15e..6a78a507c4 100644 --- a/crates/starknet_sequencer_node/src/components.rs +++ b/crates/starknet_sequencer_node/src/components.rs @@ -10,6 +10,8 @@ use starknet_monitoring_endpoint::monitoring_endpoint::{ create_monitoring_endpoint, MonitoringEndpoint, }; +use starknet_state_sync::runner::StateSyncRunner; +use starknet_state_sync::{create_state_sync_and_runner, StateSync}; use crate::clients::SequencerNodeClients; use crate::config::component_execution_config::ComponentExecutionMode; @@ -25,6 +27,8 @@ pub struct SequencerNodeComponents { pub monitoring_endpoint: Option, pub mempool_p2p_propagator: Option, pub mempool_p2p_runner: Option, + pub state_sync: Option, + pub state_sync_runner: Option, } pub fn create_node_components( @@ -111,6 +115,16 @@ pub fn create_node_components( ComponentExecutionMode::Disabled | ComponentExecutionMode::Remote => None, }; + let (state_sync, state_sync_runner) = match config.components.state_sync.execution_mode { + ComponentExecutionMode::LocalExecutionWithRemoteDisabled + | ComponentExecutionMode::LocalExecutionWithRemoteEnabled => { + let (state_sync, state_sync_runner) = + create_state_sync_and_runner(config.state_sync_config.clone()); + (Some(state_sync), Some(state_sync_runner)) + } + ComponentExecutionMode::Disabled | ComponentExecutionMode::Remote => (None, None), + }; + SequencerNodeComponents { batcher, consensus_manager, @@ -120,5 +134,7 @@ pub fn create_node_components( monitoring_endpoint, mempool_p2p_propagator, mempool_p2p_runner, + state_sync, + state_sync_runner, } } diff --git a/crates/starknet_sequencer_node/src/config/config_test.rs b/crates/starknet_sequencer_node/src/config/config_test.rs index 1975e0447a..048c2f2d4f 100644 --- a/crates/starknet_sequencer_node/src/config/config_test.rs +++ b/crates/starknet_sequencer_node/src/config/config_test.rs @@ -1,6 +1,7 @@ use std::collections::HashSet; -use std::env; use std::fs::File; +use std::path::{Path, PathBuf}; +use std::{env, fs}; use assert_json_diff::assert_json_eq; use assert_matches::assert_matches; @@ -68,8 +69,16 @@ fn test_default_config_file_is_up_to_date() { let from_default_config_file: serde_json::Value = serde_json::from_reader(File::open(DEFAULT_CONFIG_PATH).unwrap()).unwrap(); + // The validate function will fail if the data directory does not exist. + // the data directory is the default directory path for the storage of state sync config. + // We assert the directory does not exist to prevent deleting a non related folder with the same + // path. + assert!(!Path::new("data").exists()); + fs::create_dir_all("data").expect("Should make a temporary `data` directory"); + let default_config = SequencerNodeConfig::default(); assert_matches!(default_config.validate(), Ok(())); + fs::remove_dir("data").unwrap(); // Create a temporary file and dump the default config to it. let mut tmp_file_path = env::temp_dir(); @@ -105,8 +114,12 @@ fn test_config_parsing() { let required_params = RequiredParams::create_for_testing(); let args = create_test_config_load_args(required_params); let config = SequencerNodeConfig::load_and_process(args); - let config = config.expect("Parsing function failed."); + let mut config = config.expect("Parsing function failed."); + // The validate function will fail if the data directory does not exist, so we change the path + // to point to an existing directory. the data directory is the default directory path for + // the storage of state sync config. + config.state_sync_config.storage_config.db_config.path_prefix = PathBuf::from("."); let result = config_validate(&config); assert_matches!(result, Ok(_), "Expected Ok but got {:?}", result); } diff --git a/crates/starknet_sequencer_node/src/config/node_config.rs b/crates/starknet_sequencer_node/src/config/node_config.rs index e0b5112b4f..a0541f2776 100644 --- a/crates/starknet_sequencer_node/src/config/node_config.rs +++ b/crates/starknet_sequencer_node/src/config/node_config.rs @@ -26,6 +26,7 @@ use starknet_http_server::config::HttpServerConfig; use starknet_mempool_p2p::config::MempoolP2pConfig; use starknet_monitoring_endpoint::config::MonitoringEndpointConfig; use starknet_sierra_compile::config::SierraToCasmCompilationConfig; +use starknet_state_sync::config::StateSyncConfig; use validator::Validate; use crate::config::component_config::ComponentConfig; @@ -50,6 +51,8 @@ pub static CONFIG_POINTERS: LazyLock = LazyLock::new(|| { "consensus_manager_config.consensus_config.network_config.chain_id", "gateway_config.chain_info.chain_id", "mempool_p2p_config.network_config.chain_id", + "state_sync_config.storage_config.db_config.chain_id", + "state_sync_config.network_config.chain_id", ]), ), ( @@ -114,6 +117,8 @@ pub struct SequencerNodeConfig { pub mempool_p2p_config: MempoolP2pConfig, #[validate] pub monitoring_endpoint_config: MonitoringEndpointConfig, + #[validate] + pub state_sync_config: StateSyncConfig, } impl SerializeConfig for SequencerNodeConfig { @@ -134,6 +139,7 @@ impl SerializeConfig for SequencerNodeConfig { self.monitoring_endpoint_config.dump(), "monitoring_endpoint_config", ), + append_sub_config_name(self.state_sync_config.dump(), "state_sync_config"), ]; sub_configs.into_iter().flatten().collect()