From 15e8c5b722d04053899164d98089d7fb0451e242 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Tue, 18 Jul 2023 10:49:55 +0200 Subject: [PATCH] add black formatted code, only using spaces, no more tabs https://github.com/psf/black/issues/47 --- main.star | 340 +++++++++----- src/cl_forkmon/cl_forkmon_launcher.star | 124 +++-- src/el_forkmon/el_forkmon_launcher.star | 85 ++-- src/grafana/grafana_launcher.star | 126 +++-- src/mev_boost/mev_boost_context.star | 12 +- src/mev_boost/mev_boost_launcher.star | 74 +-- src/mock_mev/mock_mev_launcher.star | 35 +- src/package_io/constants.star | 25 +- src/package_io/parse_input.star | 439 ++++++++++-------- src/prometheus/prometheus_launcher.star | 85 ++-- src/shared_utils/shared_utils.star | 50 +- src/static_files/static_files.star | 32 +- src/testnet_verifier/testnet_verifier.star | 111 +++-- .../transaction_spammer.star | 40 +- 14 files changed, 936 insertions(+), 642 deletions(-) diff --git a/main.star b/main.star index 8995d185d..89da7f77a 100644 --- a/main.star +++ b/main.star @@ -1,21 +1,45 @@ -parse_input = import_module("github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star") - -static_files = import_module("github.com/kurtosis-tech/eth2-package/src/static_files/static_files.star") -genesis_constants = import_module("github.com/kurtosis-tech/eth-network-package/src/prelaunch_data_generator/genesis_constants/genesis_constants.star") - -eth_network_module = import_module("github.com/kurtosis-tech/eth-network-package/main.star") -transaction_spammer = import_module("github.com/kurtosis-tech/eth2-package/src/transaction_spammer/transaction_spammer.star") -cl_forkmon = import_module("github.com/kurtosis-tech/eth2-package/src/cl_forkmon/cl_forkmon_launcher.star") -el_forkmon = import_module("github.com/kurtosis-tech/eth2-package/src/el_forkmon/el_forkmon_launcher.star") -prometheus = import_module("github.com/kurtosis-tech/eth2-package/src/prometheus/prometheus_launcher.star") -grafana =import_module("github.com/kurtosis-tech/eth2-package/src/grafana/grafana_launcher.star") -testnet_verifier = import_module("github.com/kurtosis-tech/eth2-package/src/testnet_verifier/testnet_verifier.star") -mev_boost_launcher_module = import_module("github.com/kurtosis-tech/eth2-package/src/mev_boost/mev_boost_launcher.star") -mock_mev_launcher_module = import_module("github.com/kurtosis-tech/eth2-package/src/mock_mev/mock_mev_launcher.star") - -GRAFANA_USER = "admin" -GRAFANA_PASSWORD = "admin" -GRAFANA_DASHBOARD_PATH_URL = "/d/QdTOwy-nz/eth2-merge-kurtosis-module-dashboard?orgId=1" +parse_input = import_module( + "github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star" +) + +static_files = import_module( + "github.com/kurtosis-tech/eth2-package/src/static_files/static_files.star" +) +genesis_constants = import_module( + "github.com/kurtosis-tech/eth-network-package/src/prelaunch_data_generator/genesis_constants/genesis_constants.star" +) + +eth_network_module = import_module( + "github.com/kurtosis-tech/eth-network-package/main.star" +) +transaction_spammer = import_module( + "github.com/kurtosis-tech/eth2-package/src/transaction_spammer/transaction_spammer.star" +) +cl_forkmon = import_module( + "github.com/kurtosis-tech/eth2-package/src/cl_forkmon/cl_forkmon_launcher.star" +) +el_forkmon = import_module( + "github.com/kurtosis-tech/eth2-package/src/el_forkmon/el_forkmon_launcher.star" +) +prometheus = import_module( + "github.com/kurtosis-tech/eth2-package/src/prometheus/prometheus_launcher.star" +) +grafana = import_module( + "github.com/kurtosis-tech/eth2-package/src/grafana/grafana_launcher.star" +) +testnet_verifier = import_module( + "github.com/kurtosis-tech/eth2-package/src/testnet_verifier/testnet_verifier.star" +) +mev_boost_launcher_module = import_module( + "github.com/kurtosis-tech/eth2-package/src/mev_boost/mev_boost_launcher.star" +) +mock_mev_launcher_module = import_module( + "github.com/kurtosis-tech/eth2-package/src/mock_mev/mock_mev_launcher.star" +) + +GRAFANA_USER = "admin" +GRAFANA_PASSWORD = "admin" +GRAFANA_DASHBOARD_PATH_URL = "/d/QdTOwy-nz/eth2-merge-kurtosis-module-dashboard?orgId=1" FIRST_NODE_FINALIZATION_FACT = "cl-boot-finalization-fact" HTTP_PORT_ID_FOR_FACT = "http" @@ -23,110 +47,178 @@ HTTP_PORT_ID_FOR_FACT = "http" MEV_BOOST_SHOULD_CHECK_RELAY = True MOCK_MEV_TYPE = "mock" + def run(plan, args): - args_with_right_defaults, args_with_defaults_dict = parse_input.parse_input(args) - - num_participants = len(args_with_right_defaults.participants) - network_params = args_with_right_defaults.network_params - - grafana_datasource_config_template = read_file(static_files.GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH) - grafana_dashboards_config_template = read_file(static_files.GRAFANA_DASHBOARD_PROVIDERS_CONFIG_TEMPLATE_FILEPATH) - prometheus_config_template = read_file(static_files.PROMETHEUS_CONFIG_TEMPLATE_FILEPATH) - - plan.print("Read the prometheus, grafana templates") - - plan.print("Launching participant network with {0} participants and the following network params {1}".format(num_participants, network_params)) - all_participants, cl_genesis_timestamp = eth_network_module.run(plan, args_with_defaults_dict) - - all_el_client_contexts = [] - all_cl_client_contexts = [] - for participant in all_participants: - all_el_client_contexts.append(participant.el_client_context) - all_cl_client_contexts.append(participant.cl_client_context) - - - mev_endpoints = [] - # passed external relays get priority - # perhaps add mev_type External or remove this - if hasattr(participant, "builder_network_params") and participant.builder_network_params != None: - mev_endpoints = participant.builder_network_params.relay_end_points - # otherwise dummy relays spinup if chosen - elif args_with_right_defaults.mev_type and args_with_right_defaults.mev_type == MOCK_MEV_TYPE: - el_uri = "{0}:{1}".format(all_el_client_contexts[0].ip_addr, all_el_client_contexts[0].engine_rpc_port_num) - beacon_uri = "{0}:{1}".format(all_cl_client_contexts[0].ip_addr, all_cl_client_contexts[0].http_port_num) - jwt_secret = all_el_client_contexts[0].jwt_secret - endpoint = mock_mev_launcher_module.launch_mock_mev(plan, el_uri, beacon_uri, jwt_secret) - mev_endpoints.append(endpoint) - - # spin up the mev boost contexts if some endpoints for relays have been passed - all_mevboost_contexts = [] - if mev_endpoints: - for index, participant in enumerate(args_with_right_defaults.participants): - mev_boost_launcher = mev_boost_launcher_module.new_mev_boost_launcher(MEV_BOOST_SHOULD_CHECK_RELAY, mev_endpoints) - mev_boost_service_name = "{0}{1}".format(parse_input.MEV_BOOST_SERVICE_NAME_PREFIX, index) - mev_boost_context = mev_boost_launcher_module.launch(plan, mev_boost_launcher, mev_boost_service_name, network_params.network_id) - all_mevboost_contexts.append(mev_boost_context) - - if not args_with_right_defaults.launch_additional_services: - return - - plan.print("Launching transaction spammer") - transaction_spammer.launch_transaction_spammer(plan, genesis_constants.PRE_FUNDED_ACCOUNTS, all_el_client_contexts[0]) - plan.print("Succesfully launched transaction spammer") - - # We need a way to do time.sleep - # TODO add code that waits for CL genesis - - plan.print("Launching cl forkmon") - cl_forkmon_config_template = read_file(static_files.CL_FORKMON_CONFIG_TEMPLATE_FILEPATH) - cl_forkmon.launch_cl_forkmon(plan, cl_forkmon_config_template, all_cl_client_contexts, cl_genesis_timestamp, network_params.seconds_per_slot, network_params.slots_per_epoch) - plan.print("Succesfully launched consensus layer forkmon") - - plan.print("Launching el forkmon") - el_forkmon_config_template = read_file(static_files.EL_FORKMON_CONFIG_TEMPLATE_FILEPATH) - el_forkmon.launch_el_forkmon(plan, el_forkmon_config_template, all_el_client_contexts) - plan.print("Succesfully launched execution layer forkmon") - - plan.print("Launching prometheus...") - prometheus_private_url = prometheus.launch_prometheus( - plan, - prometheus_config_template, - all_cl_client_contexts, - ) - plan.print("Successfully launched Prometheus") - - plan.print("Launching grafana...") - grafana.launch_grafana(plan, grafana_datasource_config_template, grafana_dashboards_config_template, prometheus_private_url) - plan.print("Succesfully launched grafana") - - if args_with_right_defaults.wait_for_verifications: - plan.print("Running synchrnous testnet verifier") - testnet_verifier.run_synchronous_testnet_verification(plan, args_with_right_defaults, all_el_client_contexts, all_cl_client_contexts) - plan.print("Verification succeeded") - else: - plan.print("Running asynchronous verification") - testnet_verifier.launch_testnet_verifier(plan, args_with_right_defaults, all_el_client_contexts, all_cl_client_contexts) - plan.print("Succesfully launched asynchronous verifier") - if args_with_right_defaults.wait_for_finalization: - plan.print("Waiting for the first finalized epoch") - first_cl_client = all_cl_client_contexts[0] - first_client_beacon_name = first_cl_client.beacon_service_name - epoch_recipe = GetHttpRequestRecipe( - endpoint = "/eth/v1/beacon/states/head/finality_checkpoints", - port_id = HTTP_PORT_ID_FOR_FACT, - extract = { - "finalized_epoch": ".data.finalized.epoch" - } - ) - plan.wait(recipe = epoch_recipe, field = "extract.finalized_epoch", assertion = "!=", target_value = "0", timeout = "40m", service_name = first_client_beacon_name) - plan.print("First finalized epoch occurred successfully") - - - grafana_info = struct( - dashboard_path = GRAFANA_DASHBOARD_PATH_URL, - user = GRAFANA_USER, - password = GRAFANA_PASSWORD - ) - output = struct(grafana_info = grafana_info) - - return output + args_with_right_defaults, args_with_defaults_dict = parse_input.parse_input(args) + + num_participants = len(args_with_right_defaults.participants) + network_params = args_with_right_defaults.network_params + + grafana_datasource_config_template = read_file( + static_files.GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH + ) + grafana_dashboards_config_template = read_file( + static_files.GRAFANA_DASHBOARD_PROVIDERS_CONFIG_TEMPLATE_FILEPATH + ) + prometheus_config_template = read_file( + static_files.PROMETHEUS_CONFIG_TEMPLATE_FILEPATH + ) + + plan.print("Read the prometheus, grafana templates") + + plan.print( + "Launching participant network with {0} participants and the following network params {1}".format( + num_participants, network_params + ) + ) + all_participants, cl_genesis_timestamp = eth_network_module.run( + plan, args_with_defaults_dict + ) + + all_el_client_contexts = [] + all_cl_client_contexts = [] + for participant in all_participants: + all_el_client_contexts.append(participant.el_client_context) + all_cl_client_contexts.append(participant.cl_client_context) + + mev_endpoints = [] + # passed external relays get priority + # perhaps add mev_type External or remove this + if ( + hasattr(participant, "builder_network_params") + and participant.builder_network_params != None + ): + mev_endpoints = participant.builder_network_params.relay_end_points + # otherwise dummy relays spinup if chosen + elif ( + args_with_right_defaults.mev_type + and args_with_right_defaults.mev_type == MOCK_MEV_TYPE + ): + el_uri = "{0}:{1}".format( + all_el_client_contexts[0].ip_addr, + all_el_client_contexts[0].engine_rpc_port_num, + ) + beacon_uri = "{0}:{1}".format( + all_cl_client_contexts[0].ip_addr, all_cl_client_contexts[0].http_port_num + ) + jwt_secret = all_el_client_contexts[0].jwt_secret + endpoint = mock_mev_launcher_module.launch_mock_mev( + plan, el_uri, beacon_uri, jwt_secret + ) + mev_endpoints.append(endpoint) + + # spin up the mev boost contexts if some endpoints for relays have been passed + all_mevboost_contexts = [] + if mev_endpoints: + for index, participant in enumerate(args_with_right_defaults.participants): + mev_boost_launcher = mev_boost_launcher_module.new_mev_boost_launcher( + MEV_BOOST_SHOULD_CHECK_RELAY, mev_endpoints + ) + mev_boost_service_name = "{0}{1}".format( + parse_input.MEV_BOOST_SERVICE_NAME_PREFIX, index + ) + mev_boost_context = mev_boost_launcher_module.launch( + plan, + mev_boost_launcher, + mev_boost_service_name, + network_params.network_id, + ) + all_mevboost_contexts.append(mev_boost_context) + + if not args_with_right_defaults.launch_additional_services: + return + + plan.print("Launching transaction spammer") + transaction_spammer.launch_transaction_spammer( + plan, genesis_constants.PRE_FUNDED_ACCOUNTS, all_el_client_contexts[0] + ) + plan.print("Succesfully launched transaction spammer") + + # We need a way to do time.sleep + # TODO add code that waits for CL genesis + + plan.print("Launching cl forkmon") + cl_forkmon_config_template = read_file( + static_files.CL_FORKMON_CONFIG_TEMPLATE_FILEPATH + ) + cl_forkmon.launch_cl_forkmon( + plan, + cl_forkmon_config_template, + all_cl_client_contexts, + cl_genesis_timestamp, + network_params.seconds_per_slot, + network_params.slots_per_epoch, + ) + plan.print("Succesfully launched consensus layer forkmon") + + plan.print("Launching el forkmon") + el_forkmon_config_template = read_file( + static_files.EL_FORKMON_CONFIG_TEMPLATE_FILEPATH + ) + el_forkmon.launch_el_forkmon( + plan, el_forkmon_config_template, all_el_client_contexts + ) + plan.print("Succesfully launched execution layer forkmon") + + plan.print("Launching prometheus...") + prometheus_private_url = prometheus.launch_prometheus( + plan, + prometheus_config_template, + all_cl_client_contexts, + ) + plan.print("Successfully launched Prometheus") + + plan.print("Launching grafana...") + grafana.launch_grafana( + plan, + grafana_datasource_config_template, + grafana_dashboards_config_template, + prometheus_private_url, + ) + plan.print("Succesfully launched grafana") + + if args_with_right_defaults.wait_for_verifications: + plan.print("Running synchrnous testnet verifier") + testnet_verifier.run_synchronous_testnet_verification( + plan, + args_with_right_defaults, + all_el_client_contexts, + all_cl_client_contexts, + ) + plan.print("Verification succeeded") + else: + plan.print("Running asynchronous verification") + testnet_verifier.launch_testnet_verifier( + plan, + args_with_right_defaults, + all_el_client_contexts, + all_cl_client_contexts, + ) + plan.print("Succesfully launched asynchronous verifier") + if args_with_right_defaults.wait_for_finalization: + plan.print("Waiting for the first finalized epoch") + first_cl_client = all_cl_client_contexts[0] + first_client_beacon_name = first_cl_client.beacon_service_name + epoch_recipe = GetHttpRequestRecipe( + endpoint="/eth/v1/beacon/states/head/finality_checkpoints", + port_id=HTTP_PORT_ID_FOR_FACT, + extract={"finalized_epoch": ".data.finalized.epoch"}, + ) + plan.wait( + recipe=epoch_recipe, + field="extract.finalized_epoch", + assertion="!=", + target_value="0", + timeout="40m", + service_name=first_client_beacon_name, + ) + plan.print("First finalized epoch occurred successfully") + + grafana_info = struct( + dashboard_path=GRAFANA_DASHBOARD_PATH_URL, + user=GRAFANA_USER, + password=GRAFANA_PASSWORD, + ) + output = struct(grafana_info=grafana_info) + + return output diff --git a/src/cl_forkmon/cl_forkmon_launcher.star b/src/cl_forkmon/cl_forkmon_launcher.star index d576777ed..a9c5f855f 100644 --- a/src/cl_forkmon/cl_forkmon_launcher.star +++ b/src/cl_forkmon/cl_forkmon_launcher.star @@ -1,72 +1,94 @@ -shared_utils = import_module("github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star") +shared_utils = import_module( + "github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star" +) SERVICE_NAME = "cl-forkmon" IMAGE_NAME = "ralexstokes/ethereum_consensus_monitor:latest" -HTTP_PORT_ID = "http" -HTTP_PORT_NUMBER = 80 +HTTP_PORT_ID = "http" +HTTP_PORT_NUMBER = 80 CL_FORKMON_CONFIG_FILENAME = "cl-forkmon-config.toml" CL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE = "/config" USED_PORTS = { - HTTP_PORT_ID:shared_utils.new_port_spec(HTTP_PORT_NUMBER, shared_utils.TCP_PROTOCOL, shared_utils.HTTP_APPLICATION_PROTOCOL) + HTTP_PORT_ID: shared_utils.new_port_spec( + HTTP_PORT_NUMBER, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ) } def launch_cl_forkmon( - plan, - config_template, - cl_client_contexts, - genesis_unix_timestamp, - seconds_per_slot, - slots_per_epoch - ): - - all_cl_client_info = [] - for client in cl_client_contexts: - client_info = new_cl_client_info(client.ip_addr, client.http_port_num) - all_cl_client_info.append(client_info) - - template_data = new_config_template_data(HTTP_PORT_NUMBER, all_cl_client_info, seconds_per_slot, slots_per_epoch, genesis_unix_timestamp) - - template_and_data = shared_utils.new_template_and_data(config_template, template_data) - template_and_data_by_rel_dest_filepath = {} - template_and_data_by_rel_dest_filepath[CL_FORKMON_CONFIG_FILENAME] = template_and_data - - config_files_artifact_name = plan.render_templates(template_and_data_by_rel_dest_filepath, "cl-forkmon-config") - - config = get_config(config_files_artifact_name) - - plan.add_service(SERVICE_NAME, config) + plan, + config_template, + cl_client_contexts, + genesis_unix_timestamp, + seconds_per_slot, + slots_per_epoch, +): + all_cl_client_info = [] + for client in cl_client_contexts: + client_info = new_cl_client_info(client.ip_addr, client.http_port_num) + all_cl_client_info.append(client_info) + + template_data = new_config_template_data( + HTTP_PORT_NUMBER, + all_cl_client_info, + seconds_per_slot, + slots_per_epoch, + genesis_unix_timestamp, + ) + + template_and_data = shared_utils.new_template_and_data( + config_template, template_data + ) + template_and_data_by_rel_dest_filepath = {} + template_and_data_by_rel_dest_filepath[ + CL_FORKMON_CONFIG_FILENAME + ] = template_and_data + + config_files_artifact_name = plan.render_templates( + template_and_data_by_rel_dest_filepath, "cl-forkmon-config" + ) + + config = get_config(config_files_artifact_name) + + plan.add_service(SERVICE_NAME, config) def get_config(config_files_artifact_name): - config_file_path = shared_utils.path_join(CL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE, CL_FORKMON_CONFIG_FILENAME) - return ServiceConfig( - image = IMAGE_NAME, - ports = USED_PORTS, - files = { - CL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name, - }, - cmd = ["--config-path", config_file_path] - ) - - -def new_config_template_data(listen_port_num, cl_client_info, seconds_per_slot, slots_per_epoch, genesis_unix_timestamp): - return { - "ListenPortNum": listen_port_num, - "CLClientInfo": cl_client_info, - "SecondsPerSlot": seconds_per_slot, - "SlotsPerEpoch": slots_per_epoch, - "GenesisUnixTimestamp": genesis_unix_timestamp, - } + config_file_path = shared_utils.path_join( + CL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE, CL_FORKMON_CONFIG_FILENAME + ) + return ServiceConfig( + image=IMAGE_NAME, + ports=USED_PORTS, + files={ + CL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name, + }, + cmd=["--config-path", config_file_path], + ) + + +def new_config_template_data( + listen_port_num, + cl_client_info, + seconds_per_slot, + slots_per_epoch, + genesis_unix_timestamp, +): + return { + "ListenPortNum": listen_port_num, + "CLClientInfo": cl_client_info, + "SecondsPerSlot": seconds_per_slot, + "SlotsPerEpoch": slots_per_epoch, + "GenesisUnixTimestamp": genesis_unix_timestamp, + } def new_cl_client_info(ip_addr, port_num): - return { - "IPAddr": ip_addr, - "PortNum": port_num - } + return {"IPAddr": ip_addr, "PortNum": port_num} diff --git a/src/el_forkmon/el_forkmon_launcher.star b/src/el_forkmon/el_forkmon_launcher.star index 1c07425c3..2d51660a0 100644 --- a/src/el_forkmon/el_forkmon_launcher.star +++ b/src/el_forkmon/el_forkmon_launcher.star @@ -1,10 +1,12 @@ -shared_utils = import_module("github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star") +shared_utils = import_module( + "github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star" +) SERVICE_NAME = "el-forkmon" IMAGE_NAME = "skylenet/nodemonitor:darkmode" -HTTP_PORT_ID = "http" +HTTP_PORT_ID = "http" HTTP_PORT_NUMBER = 8080 EL_FORKMON_CONFIG_FILENAME = "el-forkmon-config.toml" @@ -12,56 +14,65 @@ EL_FORKMON_CONFIG_FILENAME = "el-forkmon-config.toml" EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE = "/config" USED_PORTS = { - HTTP_PORT_ID:shared_utils.new_port_spec(HTTP_PORT_NUMBER, shared_utils.TCP_PROTOCOL, shared_utils.HTTP_APPLICATION_PROTOCOL) + HTTP_PORT_ID: shared_utils.new_port_spec( + HTTP_PORT_NUMBER, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ) } def launch_el_forkmon( - plan, - config_template, - el_client_contexts, - ): + plan, + config_template, + el_client_contexts, +): + all_el_client_info = [] + for client in el_client_contexts: + client_info = new_el_client_info( + client.ip_addr, client.rpc_port_num, client.service_name + ) + all_el_client_info.append(client_info) - all_el_client_info = [] - for client in el_client_contexts: - client_info = new_el_client_info(client.ip_addr, client.rpc_port_num, client.service_name) - all_el_client_info.append(client_info) + template_data = new_config_template_data(HTTP_PORT_NUMBER, all_el_client_info) - template_data = new_config_template_data(HTTP_PORT_NUMBER, all_el_client_info) + template_and_data = shared_utils.new_template_and_data( + config_template, template_data + ) + template_and_data_by_rel_dest_filepath = {} + template_and_data_by_rel_dest_filepath[ + EL_FORKMON_CONFIG_FILENAME + ] = template_and_data - template_and_data = shared_utils.new_template_and_data(config_template, template_data) - template_and_data_by_rel_dest_filepath = {} - template_and_data_by_rel_dest_filepath[EL_FORKMON_CONFIG_FILENAME] = template_and_data + config_files_artifact_name = plan.render_templates( + template_and_data_by_rel_dest_filepath, "el-forkmon-config" + ) - config_files_artifact_name = plan.render_templates(template_and_data_by_rel_dest_filepath, "el-forkmon-config") + config = get_config(config_files_artifact_name) - config = get_config(config_files_artifact_name) - - plan.add_service(SERVICE_NAME, config) + plan.add_service(SERVICE_NAME, config) def get_config(config_files_artifact_name): - config_file_path = shared_utils.path_join(EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE, EL_FORKMON_CONFIG_FILENAME) - return ServiceConfig( - image = IMAGE_NAME, - ports = USED_PORTS, - files = { - EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name, - }, - cmd = [config_file_path] - ) + config_file_path = shared_utils.path_join( + EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE, EL_FORKMON_CONFIG_FILENAME + ) + return ServiceConfig( + image=IMAGE_NAME, + ports=USED_PORTS, + files={ + EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name, + }, + cmd=[config_file_path], + ) def new_config_template_data(listen_port_num, el_client_info): - return { - "ListenPortNum": listen_port_num, - "ELClientInfo": el_client_info, - } + return { + "ListenPortNum": listen_port_num, + "ELClientInfo": el_client_info, + } def new_el_client_info(ip_addr, port_num, service_name): - return { - "IPAddr": ip_addr, - "PortNum": port_num, - "Name": service_name - } + return {"IPAddr": ip_addr, "PortNum": port_num, "Name": service_name} diff --git a/src/grafana/grafana_launcher.star b/src/grafana/grafana_launcher.star index 2d9eb15e7..005e8c0c6 100644 --- a/src/grafana/grafana_launcher.star +++ b/src/grafana/grafana_launcher.star @@ -1,5 +1,9 @@ -shared_utils = import_module("github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star") -static_files = import_module("github.com/kurtosis-tech/eth2-package/src/static_files/static_files.star") +shared_utils = import_module( + "github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star" +) +static_files = import_module( + "github.com/kurtosis-tech/eth2-package/src/static_files/static_files.star" +) SERVICE_NAME = "grafana" @@ -17,59 +21,95 @@ CONFIG_DIRPATH_ENV_VAR = "GF_PATHS_PROVISIONING" GRAFANA_CONFIG_DIRPATH_ON_SERVICE = "/config" GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE = "/dashboards" -GRAFANA_DASHBOARDS_FILEPATH_ON_SERVICE = GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE + "/dashboard.json" +GRAFANA_DASHBOARDS_FILEPATH_ON_SERVICE = ( + GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE + "/dashboard.json" +) USED_PORTS = { - HTTP_PORT_ID: shared_utils.new_port_spec(HTTP_PORT_NUMBER_UINT16, shared_utils.TCP_PROTOCOL, shared_utils.HTTP_APPLICATION_PROTOCOL) + HTTP_PORT_ID: shared_utils.new_port_spec( + HTTP_PORT_NUMBER_UINT16, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ) } -def launch_grafana(plan, datasource_config_template, dashboard_providers_config_template, prometheus_private_url): - grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid = get_grafana_config_dir_artifact_uuid(plan, datasource_config_template, dashboard_providers_config_template, prometheus_private_url) - - config = get_config(grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid) - - plan.add_service(SERVICE_NAME, config) - - -def get_grafana_config_dir_artifact_uuid(plan, datasource_config_template, dashboard_providers_config_template, prometheus_private_url): - datasource_data = new_datasource_config_template_data(prometheus_private_url) - datasource_template_and_data = shared_utils.new_template_and_data(datasource_config_template, datasource_data) - - dashboard_providers_data = new_dashboard_providers_config_template_data(GRAFANA_DASHBOARDS_FILEPATH_ON_SERVICE) - dashboard_providers_template_and_data = shared_utils.new_template_and_data(dashboard_providers_config_template, dashboard_providers_data) - - template_and_data_by_rel_dest_filepath = {} - template_and_data_by_rel_dest_filepath[DATASOURCE_CONFIG_REL_FILEPATH] = datasource_template_and_data - template_and_data_by_rel_dest_filepath[DASHBOARD_PROVIDERS_CONFIG_REL_FILEPATH] = dashboard_providers_template_and_data - - grafana_config_artifacts_name = plan.render_templates(template_and_data_by_rel_dest_filepath, name="grafana-config") - - grafana_dashboards_artifacts_name = plan.upload_files(static_files.GRAFANA_DASHBOARDS_CONFIG_DIRPATH, name="grafana-dashboards") - - return grafana_config_artifacts_name, grafana_dashboards_artifacts_name +def launch_grafana( + plan, + datasource_config_template, + dashboard_providers_config_template, + prometheus_private_url, +): + ( + grafana_config_artifacts_uuid, + grafana_dashboards_artifacts_uuid, + ) = get_grafana_config_dir_artifact_uuid( + plan, + datasource_config_template, + dashboard_providers_config_template, + prometheus_private_url, + ) + + config = get_config( + grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid + ) + + plan.add_service(SERVICE_NAME, config) + + +def get_grafana_config_dir_artifact_uuid( + plan, + datasource_config_template, + dashboard_providers_config_template, + prometheus_private_url, +): + datasource_data = new_datasource_config_template_data(prometheus_private_url) + datasource_template_and_data = shared_utils.new_template_and_data( + datasource_config_template, datasource_data + ) + + dashboard_providers_data = new_dashboard_providers_config_template_data( + GRAFANA_DASHBOARDS_FILEPATH_ON_SERVICE + ) + dashboard_providers_template_and_data = shared_utils.new_template_and_data( + dashboard_providers_config_template, dashboard_providers_data + ) + + template_and_data_by_rel_dest_filepath = {} + template_and_data_by_rel_dest_filepath[ + DATASOURCE_CONFIG_REL_FILEPATH + ] = datasource_template_and_data + template_and_data_by_rel_dest_filepath[ + DASHBOARD_PROVIDERS_CONFIG_REL_FILEPATH + ] = dashboard_providers_template_and_data + + grafana_config_artifacts_name = plan.render_templates( + template_and_data_by_rel_dest_filepath, name="grafana-config" + ) + + grafana_dashboards_artifacts_name = plan.upload_files( + static_files.GRAFANA_DASHBOARDS_CONFIG_DIRPATH, name="grafana-dashboards" + ) + + return grafana_config_artifacts_name, grafana_dashboards_artifacts_name def get_config(grafana_config_artifacts_name, grafana_dashboards_artifacts_name): - return ServiceConfig( - image = IMAGE_NAME, - ports = USED_PORTS, - env_vars = {CONFIG_DIRPATH_ENV_VAR: GRAFANA_CONFIG_DIRPATH_ON_SERVICE}, - files = { - GRAFANA_CONFIG_DIRPATH_ON_SERVICE: grafana_config_artifacts_name, - GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE: grafana_dashboards_artifacts_name - } - ) + return ServiceConfig( + image=IMAGE_NAME, + ports=USED_PORTS, + env_vars={CONFIG_DIRPATH_ENV_VAR: GRAFANA_CONFIG_DIRPATH_ON_SERVICE}, + files={ + GRAFANA_CONFIG_DIRPATH_ON_SERVICE: grafana_config_artifacts_name, + GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE: grafana_dashboards_artifacts_name, + }, + ) def new_datasource_config_template_data(prometheus_url): - return { - "PrometheusURL": prometheus_url - } + return {"PrometheusURL": prometheus_url} def new_dashboard_providers_config_template_data(dashboards_dirpath): - return { - "DashboardsDirpath": dashboards_dirpath - } + return {"DashboardsDirpath": dashboards_dirpath} diff --git a/src/mev_boost/mev_boost_context.star b/src/mev_boost/mev_boost_context.star index a245660f3..1d6cb6c1d 100644 --- a/src/mev_boost/mev_boost_context.star +++ b/src/mev_boost/mev_boost_context.star @@ -1,9 +1,11 @@ def new_mev_boost_context(private_ip_address, port): - return struct( - private_ip_address = private_ip_address, - port = port, - ) + return struct( + private_ip_address=private_ip_address, + port=port, + ) def mev_boost_endpoint(mev_boost_context): - return "http://{0}:{1}".format(mev_boost_context.private_ip_address, mev_boost_context.port) + return "http://{0}:{1}".format( + mev_boost_context.private_ip_address, mev_boost_context.port + ) diff --git a/src/mev_boost/mev_boost_launcher.star b/src/mev_boost/mev_boost_launcher.star index ffc89af78..91a9db9ce 100644 --- a/src/mev_boost/mev_boost_launcher.star +++ b/src/mev_boost/mev_boost_launcher.star @@ -1,49 +1,63 @@ -shared_utils = import_module("github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star") -mev_boost_context_module = import_module("github.com/kurtosis-tech/eth2-package/src/mev_boost/mev_boost_context.star") -parse_input = import_module("github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star") +shared_utils = import_module( + "github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star" +) +mev_boost_context_module = import_module( + "github.com/kurtosis-tech/eth2-package/src/mev_boost/mev_boost_context.star" +) +parse_input = import_module( + "github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star" +) FLASHBOTS_MEV_BOOST_IMAGE = "flashbots/mev-boost" FLASHBOTS_MEV_BOOST_PROTOCOL = "TCP" USED_PORTS = { - "api": shared_utils.new_port_spec(parse_input.FLASHBOTS_MEV_BOOST_PORT, FLASHBOTS_MEV_BOOST_PROTOCOL, wait="5s") + "api": shared_utils.new_port_spec( + parse_input.FLASHBOTS_MEV_BOOST_PORT, FLASHBOTS_MEV_BOOST_PROTOCOL, wait="5s" + ) } NETWORK_ID_TO_NAME = { - "5": "goerli", - "11155111": "sepolia", - "3": "ropsten", + "5": "goerli", + "11155111": "sepolia", + "3": "ropsten", } + def launch(plan, mev_boost_launcher, service_name, network_id): - config = get_config(mev_boost_launcher, network_id) + config = get_config(mev_boost_launcher, network_id) - mev_boost_service = plan.add_service(service_name, config) + mev_boost_service = plan.add_service(service_name, config) - return mev_boost_context_module.new_mev_boost_context(mev_boost_service.ip_address, parse_input.FLASHBOTS_MEV_BOOST_PORT) + return mev_boost_context_module.new_mev_boost_context( + mev_boost_service.ip_address, parse_input.FLASHBOTS_MEV_BOOST_PORT + ) def get_config(mev_boost_launcher, network_id): - command = ["mev-boost"] - - if mev_boost_launcher.should_check_relay: - command.append("-relay-check") - - return ServiceConfig( - image = FLASHBOTS_MEV_BOOST_IMAGE, - ports = USED_PORTS, - cmd = command, - env_vars = { - # TODO remove the hardcoding - # This is set to match this file https://github.com/kurtosis-tech/eth-network-package/blob/main/static_files/genesis-generation-config/cl/config.yaml.tmpl#L11 - "GENESIS_FORK_VERSION": "0x10000038", - "BOOST_LISTEN_ADDR": "0.0.0.0:{0}".format(parse_input.FLASHBOTS_MEV_BOOST_PORT), - "SKIP_RELAY_SIGNATURE_CHECK": "true", - "RELAYS": mev_boost_launcher.relay_end_points[0] - } - ) + command = ["mev-boost"] + + if mev_boost_launcher.should_check_relay: + command.append("-relay-check") + + return ServiceConfig( + image=FLASHBOTS_MEV_BOOST_IMAGE, + ports=USED_PORTS, + cmd=command, + env_vars={ + # TODO remove the hardcoding + # This is set to match this file https://github.com/kurtosis-tech/eth-network-package/blob/main/static_files/genesis-generation-config/cl/config.yaml.tmpl#L11 + "GENESIS_FORK_VERSION": "0x10000038", + "BOOST_LISTEN_ADDR": "0.0.0.0:{0}".format( + parse_input.FLASHBOTS_MEV_BOOST_PORT + ), + "SKIP_RELAY_SIGNATURE_CHECK": "true", + "RELAYS": mev_boost_launcher.relay_end_points[0], + }, + ) def new_mev_boost_launcher(should_check_relay, relay_end_points): - return struct(should_check_relay=should_check_relay, relay_end_points=relay_end_points) - + return struct( + should_check_relay=should_check_relay, relay_end_points=relay_end_points + ) diff --git a/src/mock_mev/mock_mev_launcher.star b/src/mock_mev/mock_mev_launcher.star index 9b4b44fbd..db16572c5 100644 --- a/src/mock_mev/mock_mev_launcher.star +++ b/src/mock_mev/mock_mev_launcher.star @@ -3,19 +3,24 @@ MOCK_MEV_SERVICE_NAME = "mock-mev" MOCK_MEV_BUILDER_PORT = 18550 DUMMY_PUB_KEY_THAT_ISNT_VERIFIED = "0xae1c2ca7bbd6f415a5aa5bb4079caf0a5c273104be5fb5e40e2b5a2f080b2f5bd945336f2a9e8ba346299cb65b0f84c8" + def launch_mock_mev(plan, el_uri, beacon_uri, jwt_secret): - mock_builder = plan.add_service( - name = MOCK_MEV_SERVICE_NAME, - config = ServiceConfig( - image = MOCK_MEV_IMAGE, - ports = { - "rest": PortSpec(number = MOCK_MEV_BUILDER_PORT, transport_protocol="TCP"), - }, - cmd = [ - "--jwt-secret={0}".format(jwt_secret), - "--el={0}".format(el_uri), - "--cl={0}".format(beacon_uri) - ] - ) - ) - return "http://{0}@{1}:{2}".format(DUMMY_PUB_KEY_THAT_ISNT_VERIFIED, mock_builder.ip_address, MOCK_MEV_BUILDER_PORT) + mock_builder = plan.add_service( + name=MOCK_MEV_SERVICE_NAME, + config=ServiceConfig( + image=MOCK_MEV_IMAGE, + ports={ + "rest": PortSpec( + number=MOCK_MEV_BUILDER_PORT, transport_protocol="TCP" + ), + }, + cmd=[ + "--jwt-secret={0}".format(jwt_secret), + "--el={0}".format(el_uri), + "--cl={0}".format(beacon_uri), + ], + ), + ) + return "http://{0}@{1}:{2}".format( + DUMMY_PUB_KEY_THAT_ISNT_VERIFIED, mock_builder.ip_address, MOCK_MEV_BUILDER_PORT + ) diff --git a/src/package_io/constants.star b/src/package_io/constants.star index ff73b9ef7..8559cb19e 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -1,22 +1,19 @@ EL_CLIENT_TYPE = struct( - geth="geth", - erigon="erigon", - nethermind="nethermind", - besu="besu" + geth="geth", erigon="erigon", nethermind="nethermind", besu="besu" ) CL_CLIENT_TYPE = struct( - lighthouse="lighthouse", - teku="teku", - nimbus="nimbus", - prysm="prysm", - lodestar="lodestar" + lighthouse="lighthouse", + teku="teku", + nimbus="nimbus", + prysm="prysm", + lodestar="lodestar", ) GLOBAL_CLIENT_LOG_LEVEL = struct( - info="info", - error="error", - warn="warn", - debug="debug", - trace="trace", + info="info", + error="error", + warn="warn", + debug="debug", + trace="trace", ) diff --git a/src/package_io/parse_input.star b/src/package_io/parse_input.star index 14927e4b2..5f35bb015 100644 --- a/src/package_io/parse_input.star +++ b/src/package_io/parse_input.star @@ -1,16 +1,16 @@ DEFAULT_EL_IMAGES = { - "geth": "ethereum/client-go:latest", - "erigon": "thorax/erigon:devel", - "nethermind": "nethermind/nethermind:latest", - "besu": "hyperledger/besu:develop" + "geth": "ethereum/client-go:latest", + "erigon": "thorax/erigon:devel", + "nethermind": "nethermind/nethermind:latest", + "besu": "hyperledger/besu:develop", } DEFAULT_CL_IMAGES = { - "lighthouse": "sigp/lighthouse:latest", - "teku": "consensys/teku:latest", - "nimbus": "statusim/nimbus-eth2:multiarch-latest", - "prysm": "prysmaticlabs/prysm-beacon-chain:latest,prysmaticlabs/prysm-validator:latest", - "lodestar": "chainsafe/lodestar:latest", + "lighthouse": "sigp/lighthouse:latest", + "teku": "consensys/teku:latest", + "nimbus": "statusim/nimbus-eth2:multiarch-latest", + "prysm": "prysmaticlabs/prysm-beacon-chain:latest,prysmaticlabs/prysm-validator:latest", + "lodestar": "chainsafe/lodestar:latest", } BESU_NODE_NAME = "besu" @@ -25,195 +25,252 @@ MEV_BOOST_SERVICE_NAME_PREFIX = "mev-boost-" def parse_input(input_args): - result = default_input_args() - for attr in input_args: - value = input_args[attr] - # if its insterted we use the value inserted - if attr not in ATTR_TO_BE_SKIPPED_AT_ROOT and attr in input_args: - result[attr] = value - elif attr == "network_params": - for sub_attr in input_args["network_params"]: - sub_value = input_args["network_params"][sub_attr] - result["network_params"][sub_attr] = sub_value - elif attr == "participants": - participants = [] - for participant in input_args["participants"]: - new_participant = default_participant() - for sub_attr, sub_value in participant.items(): - # if the value is set in input we set it in participant - new_participant[sub_attr] = sub_value - participants.append(new_participant) - result["participants"] = participants - - # validation of the above defaults - for index, participant in enumerate(result["participants"]): - el_client_type = participant["el_client_type"] - cl_client_type = participant["cl_client_type"] - - if index == 0 and el_client_type in (BESU_NODE_NAME, NETHERMIND_NODE_NAME): - fail("besu/nethermind cant be the first participant") - - if cl_client_type in (NIMBUS_NODE_NAME) and (result["network_params"]["seconds_per_slot"] < 12): - fail("nimbus can't be run with slot times below 12 seconds") - - el_image = participant["el_client_image"] - if el_image == "": - default_image = DEFAULT_EL_IMAGES.get(el_client_type, "") - if default_image == "": - fail("{0} received an empty image name and we don't have a default for it".format(el_client_type)) - participant["el_client_image"] = default_image - - cl_image = participant["cl_client_image"] - if cl_image == "": - default_image = DEFAULT_CL_IMAGES.get(cl_client_type, "") - if default_image == "": - fail("{0} received an empty image name and we don't have a default for it".format(cl_client_type)) - participant["cl_client_image"] = default_image - - beacon_extra_params = participant.get("beacon_extra_params", []) - participant["beacon_extra_params"] = beacon_extra_params - - validator_extra_params = participant.get("validator_extra_params", []) - participant["validator_extra_params"] = validator_extra_params - - if result["network_params"]["network_id"].strip() == "": - fail("network_id is empty or spaces it needs to be of non zero length") - - if result["network_params"]["deposit_contract_address"].strip() == "": - fail("deposit_contract_address is empty or spaces it needs to be of non zero length") - - if result["network_params"]["preregistered_validator_keys_mnemonic"].strip() == "": - fail("preregistered_validator_keys_mnemonic is empty or spaces it needs to be of non zero length") - - if result["network_params"]["slots_per_epoch"] == 0: - fail("slots_per_epoch is 0 needs to be > 0 ") - - if result["network_params"]["seconds_per_slot"] == 0: - fail("seconds_per_slot is 0 needs to be > 0 ") - - if result["network_params"]["genesis_delay"] == 0: - fail("genesis_delay is 0 needs to be > 0 ") - - if result["network_params"]["capella_fork_epoch"] == 0: - fail("capella_fork_epoch is 0 needs to be > 0 ") - - if result["network_params"]["deneb_fork_epoch"] == 0: - fail("deneb_fork_epoch is 0 needs to be > 0 ") - - required_num_validtors = 2 * result["network_params"]["slots_per_epoch"] - actual_num_validators = len(result["participants"]) * result["network_params"]["num_validator_keys_per_node"] - if required_num_validtors > actual_num_validators: - fail("required_num_validtors - {0} is greater than actual_num_validators - {1}".format(required_num_validtors, actual_num_validators)) - - # Remove if nethermind doesn't break as second node we already test above if its the first node - if len(result["participants"]) >= 2 and result["participants"][1]["el_client_type"] == NETHERMIND_NODE_NAME: - fail("nethermind can't be the first or second node") - - if result.get("mev_type") in ("mock", "full"): - result = enrich_mev_extra_params(result, MEV_BOOST_SERVICE_NAME_PREFIX, FLASHBOTS_MEV_BOOST_PORT) - - return struct( - participants=[struct( - el_client_type=participant["el_client_type"], - el_client_image=participant["el_client_image"], - el_client_log_level=participant["el_client_log_level"], - cl_client_type=participant["cl_client_type"], - cl_client_image=participant["cl_client_image"], - cl_client_log_level=participant["cl_client_log_level"], - beacon_extra_params=participant["beacon_extra_params"], - el_extra_params=participant["el_extra_params"], - validator_extra_params=participant["validator_extra_params"], - builder_network_params=participant["builder_network_params"] - ) for participant in result["participants"]], - network_params=struct( - preregistered_validator_keys_mnemonic=result["network_params"]["preregistered_validator_keys_mnemonic"], - num_validator_keys_per_node=result["network_params"]["num_validator_keys_per_node"], - network_id=result["network_params"]["network_id"], - deposit_contract_address=result["network_params"]["deposit_contract_address"], - seconds_per_slot=result["network_params"]["seconds_per_slot"], - slots_per_epoch=result["network_params"]["slots_per_epoch"], - capella_fork_epoch=result["network_params"]["capella_fork_epoch"], - deneb_fork_epoch=result["network_params"]["deneb_fork_epoch"], - genesis_delay=result["network_params"]["genesis_delay"] - ), - launch_additional_services=result["launch_additional_services"], - wait_for_finalization=result["wait_for_finalization"], - wait_for_verifications=result["wait_for_verifications"], - verifications_epoch_limit=result["verifications_epoch_limit"], - global_client_log_level=result["global_client_log_level"], - mev_type=result["mev_type"], - ), result - -def get_client_log_level_or_default(participant_log_level, global_log_level, client_log_levels): - log_level = participant_log_level - if log_level == "": - log_level = client_log_levels.get(global_log_level, "") - if log_level == "": - fail("No participant log level defined, and the client log level has no mapping for global log level '{0}'".format(global_log_level)) - return log_level + result = default_input_args() + for attr in input_args: + value = input_args[attr] + # if its insterted we use the value inserted + if attr not in ATTR_TO_BE_SKIPPED_AT_ROOT and attr in input_args: + result[attr] = value + elif attr == "network_params": + for sub_attr in input_args["network_params"]: + sub_value = input_args["network_params"][sub_attr] + result["network_params"][sub_attr] = sub_value + elif attr == "participants": + participants = [] + for participant in input_args["participants"]: + new_participant = default_participant() + for sub_attr, sub_value in participant.items(): + # if the value is set in input we set it in participant + new_participant[sub_attr] = sub_value + participants.append(new_participant) + result["participants"] = participants + + # validation of the above defaults + for index, participant in enumerate(result["participants"]): + el_client_type = participant["el_client_type"] + cl_client_type = participant["cl_client_type"] + + if index == 0 and el_client_type in (BESU_NODE_NAME, NETHERMIND_NODE_NAME): + fail("besu/nethermind cant be the first participant") + + if cl_client_type in (NIMBUS_NODE_NAME) and ( + result["network_params"]["seconds_per_slot"] < 12 + ): + fail("nimbus can't be run with slot times below 12 seconds") + + el_image = participant["el_client_image"] + if el_image == "": + default_image = DEFAULT_EL_IMAGES.get(el_client_type, "") + if default_image == "": + fail( + "{0} received an empty image name and we don't have a default for it".format( + el_client_type + ) + ) + participant["el_client_image"] = default_image + + cl_image = participant["cl_client_image"] + if cl_image == "": + default_image = DEFAULT_CL_IMAGES.get(cl_client_type, "") + if default_image == "": + fail( + "{0} received an empty image name and we don't have a default for it".format( + cl_client_type + ) + ) + participant["cl_client_image"] = default_image + + beacon_extra_params = participant.get("beacon_extra_params", []) + participant["beacon_extra_params"] = beacon_extra_params + + validator_extra_params = participant.get("validator_extra_params", []) + participant["validator_extra_params"] = validator_extra_params + + if result["network_params"]["network_id"].strip() == "": + fail("network_id is empty or spaces it needs to be of non zero length") + + if result["network_params"]["deposit_contract_address"].strip() == "": + fail( + "deposit_contract_address is empty or spaces it needs to be of non zero length" + ) + + if result["network_params"]["preregistered_validator_keys_mnemonic"].strip() == "": + fail( + "preregistered_validator_keys_mnemonic is empty or spaces it needs to be of non zero length" + ) + + if result["network_params"]["slots_per_epoch"] == 0: + fail("slots_per_epoch is 0 needs to be > 0 ") + + if result["network_params"]["seconds_per_slot"] == 0: + fail("seconds_per_slot is 0 needs to be > 0 ") + + if result["network_params"]["genesis_delay"] == 0: + fail("genesis_delay is 0 needs to be > 0 ") + + if result["network_params"]["capella_fork_epoch"] == 0: + fail("capella_fork_epoch is 0 needs to be > 0 ") + + if result["network_params"]["deneb_fork_epoch"] == 0: + fail("deneb_fork_epoch is 0 needs to be > 0 ") + + required_num_validtors = 2 * result["network_params"]["slots_per_epoch"] + actual_num_validators = ( + len(result["participants"]) + * result["network_params"]["num_validator_keys_per_node"] + ) + if required_num_validtors > actual_num_validators: + fail( + "required_num_validtors - {0} is greater than actual_num_validators - {1}".format( + required_num_validtors, actual_num_validators + ) + ) + + # Remove if nethermind doesn't break as second node we already test above if its the first node + if ( + len(result["participants"]) >= 2 + and result["participants"][1]["el_client_type"] == NETHERMIND_NODE_NAME + ): + fail("nethermind can't be the first or second node") + + if result.get("mev_type") in ("mock", "full"): + result = enrich_mev_extra_params( + result, MEV_BOOST_SERVICE_NAME_PREFIX, FLASHBOTS_MEV_BOOST_PORT + ) + + return ( + struct( + participants=[ + struct( + el_client_type=participant["el_client_type"], + el_client_image=participant["el_client_image"], + el_client_log_level=participant["el_client_log_level"], + cl_client_type=participant["cl_client_type"], + cl_client_image=participant["cl_client_image"], + cl_client_log_level=participant["cl_client_log_level"], + beacon_extra_params=participant["beacon_extra_params"], + el_extra_params=participant["el_extra_params"], + validator_extra_params=participant["validator_extra_params"], + builder_network_params=participant["builder_network_params"], + ) + for participant in result["participants"] + ], + network_params=struct( + preregistered_validator_keys_mnemonic=result["network_params"][ + "preregistered_validator_keys_mnemonic" + ], + num_validator_keys_per_node=result["network_params"][ + "num_validator_keys_per_node" + ], + network_id=result["network_params"]["network_id"], + deposit_contract_address=result["network_params"][ + "deposit_contract_address" + ], + seconds_per_slot=result["network_params"]["seconds_per_slot"], + slots_per_epoch=result["network_params"]["slots_per_epoch"], + capella_fork_epoch=result["network_params"]["capella_fork_epoch"], + deneb_fork_epoch=result["network_params"]["deneb_fork_epoch"], + genesis_delay=result["network_params"]["genesis_delay"], + ), + launch_additional_services=result["launch_additional_services"], + wait_for_finalization=result["wait_for_finalization"], + wait_for_verifications=result["wait_for_verifications"], + verifications_epoch_limit=result["verifications_epoch_limit"], + global_client_log_level=result["global_client_log_level"], + mev_type=result["mev_type"], + ), + result, + ) + + +def get_client_log_level_or_default( + participant_log_level, global_log_level, client_log_levels +): + log_level = participant_log_level + if log_level == "": + log_level = client_log_levels.get(global_log_level, "") + if log_level == "": + fail( + "No participant log level defined, and the client log level has no mapping for global log level '{0}'".format( + global_log_level + ) + ) + return log_level + def default_input_args(): - network_params = default_network_params() - participants = [default_participant()] - return { - "mev_type": None, - "participants": participants, - "network_params": network_params, - "launch_additional_services": True, - "wait_for_finalization": False, - "wait_for_verifications": False, - "verifications_epoch_limit": 5, - "global_client_log_level": "info" - } + network_params = default_network_params() + participants = [default_participant()] + return { + "mev_type": None, + "participants": participants, + "network_params": network_params, + "launch_additional_services": True, + "wait_for_finalization": False, + "wait_for_verifications": False, + "verifications_epoch_limit": 5, + "global_client_log_level": "info", + } + def default_network_params(): - # this is temporary till we get params working - return { - "preregistered_validator_keys_mnemonic": "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete", - "num_validator_keys_per_node": 64, - "network_id": "3151908", - "deposit_contract_address": "0x4242424242424242424242424242424242424242", - "seconds_per_slot": 12, - "slots_per_epoch": 32, - "genesis_delay": 120, - "capella_fork_epoch": 1, - # arbitrarily large while we sort out https://github.com/kurtosis-tech/eth-network-package/issues/42 - # this will take 53~ hoours for now - "deneb_fork_epoch": 500, - } + # this is temporary till we get params working + return { + "preregistered_validator_keys_mnemonic": "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete", + "num_validator_keys_per_node": 64, + "network_id": "3151908", + "deposit_contract_address": "0x4242424242424242424242424242424242424242", + "seconds_per_slot": 12, + "slots_per_epoch": 32, + "genesis_delay": 120, + "capella_fork_epoch": 1, + # arbitrarily large while we sort out https://github.com/kurtosis-tech/eth-network-package/issues/42 + # this will take 53~ hoours for now + "deneb_fork_epoch": 500, + } + def default_participant(): - return { - "el_client_type": "geth", - "el_client_image": "", - "el_client_log_level": "", - "cl_client_type": "lighthouse", - "cl_client_image": "", - "cl_client_log_level": "", - "beacon_extra_params": [], - "el_extra_params": [], - "validator_extra_params": [], - "builder_network_params": None, - "count": 1 - } + return { + "el_client_type": "geth", + "el_client_image": "", + "el_client_log_level": "", + "cl_client_type": "lighthouse", + "cl_client_image": "", + "cl_client_log_level": "", + "beacon_extra_params": [], + "el_extra_params": [], + "validator_extra_params": [], + "builder_network_params": None, + "count": 1, + } # TODO perhaps clean this up into a map def enrich_mev_extra_params(parsed_arguments_dict, mev_prefix, mev_port): - for index, participant in enumerate(parsed_arguments_dict["participants"]): - mev_url = "http://{0}{1}:{2}".format(mev_prefix, index, mev_port) - if participant["cl_client_type"] == "lighthouse": - participant["validator_extra_params"].append("--builder-proposals") - participant["beacon_extra_params"].append("--builder={0}".format(mev_url)) - if participant["cl_client_type"] == "lodestar": - participant["validator_extra_params"].append("--builder") - participant["beacon_extra_params"].append("--builder", "--builder.urls={0}".format(mev_url)) - if participant["cl_client_type"] == "nimbus": - participant["validator_extra_params"].append("--payload-builder=true") - participant["beacon_extra_params"].append("--payload-builder=true", "--payload-builder-urs={0}".format(mev_url)) - if participant["cl_client_type"] == "teku": - participant["beacon_extra_params"].append("--validators-builder-registration-default-enabled=true", "--builder-endpoint=".format(mev_url)) - if participant["cl_client_type"] == "prysm": - participant["validator_extra_params"].append("--enable-builder") - participant["beacon_extra_params"].append("--http-mev-relay={0}".format(mev_url)) - return parsed_arguments_dict + for index, participant in enumerate(parsed_arguments_dict["participants"]): + mev_url = "http://{0}{1}:{2}".format(mev_prefix, index, mev_port) + if participant["cl_client_type"] == "lighthouse": + participant["validator_extra_params"].append("--builder-proposals") + participant["beacon_extra_params"].append("--builder={0}".format(mev_url)) + if participant["cl_client_type"] == "lodestar": + participant["validator_extra_params"].append("--builder") + participant["beacon_extra_params"].append( + "--builder", "--builder.urls={0}".format(mev_url) + ) + if participant["cl_client_type"] == "nimbus": + participant["validator_extra_params"].append("--payload-builder=true") + participant["beacon_extra_params"].append( + "--payload-builder=true", "--payload-builder-urs={0}".format(mev_url) + ) + if participant["cl_client_type"] == "teku": + participant["beacon_extra_params"].append( + "--validators-builder-registration-default-enabled=true", + "--builder-endpoint=".format(mev_url), + ) + if participant["cl_client_type"] == "prysm": + participant["validator_extra_params"].append("--enable-builder") + participant["beacon_extra_params"].append( + "--http-mev-relay={0}".format(mev_url) + ) + return parsed_arguments_dict diff --git a/src/prometheus/prometheus_launcher.star b/src/prometheus/prometheus_launcher.star index 201c963e2..11d30eb4d 100644 --- a/src/prometheus/prometheus_launcher.star +++ b/src/prometheus/prometheus_launcher.star @@ -1,4 +1,6 @@ -shared_utils = import_module("github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star") +shared_utils = import_module( + "github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star" +) SERVICE_NAME = "prometheus" @@ -12,54 +14,61 @@ CONFIG_FILENAME = "prometheus-config.yml" CONFIG_DIR_MOUNTPOINT_ON_PROMETHEUS = "/config" USED_PORTS = { - HTTP_PORT_ID: shared_utils.new_port_spec(HTTP_PORT_NUMBER, shared_utils.TCP_PROTOCOL, shared_utils.HTTP_APPLICATION_PROTOCOL) + HTTP_PORT_ID: shared_utils.new_port_spec( + HTTP_PORT_NUMBER, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ) } + def launch_prometheus(plan, config_template, cl_client_contexts): - all_cl_nodes_metrics_info = [] - for client in cl_client_contexts: - all_cl_nodes_metrics_info.extend(client.cl_nodes_metrics_info) + all_cl_nodes_metrics_info = [] + for client in cl_client_contexts: + all_cl_nodes_metrics_info.extend(client.cl_nodes_metrics_info) - template_data = new_config_template_data(all_cl_nodes_metrics_info) - template_and_data = shared_utils.new_template_and_data(config_template, template_data) - template_and_data_by_rel_dest_filepath = {} - template_and_data_by_rel_dest_filepath[CONFIG_FILENAME] = template_and_data + template_data = new_config_template_data(all_cl_nodes_metrics_info) + template_and_data = shared_utils.new_template_and_data( + config_template, template_data + ) + template_and_data_by_rel_dest_filepath = {} + template_and_data_by_rel_dest_filepath[CONFIG_FILENAME] = template_and_data - config_files_artifact_name = plan.render_templates(template_and_data_by_rel_dest_filepath, "prometheus-config") + config_files_artifact_name = plan.render_templates( + template_and_data_by_rel_dest_filepath, "prometheus-config" + ) - config = get_config(config_files_artifact_name) - prometheus_service = plan.add_service(SERVICE_NAME, config) + config = get_config(config_files_artifact_name) + prometheus_service = plan.add_service(SERVICE_NAME, config) - private_ip_address = prometheus_service.ip_address - prometheus_service_http_port = prometheus_service.ports[HTTP_PORT_ID].number + private_ip_address = prometheus_service.ip_address + prometheus_service_http_port = prometheus_service.ports[HTTP_PORT_ID].number - return "http://{0}:{1}".format(private_ip_address, prometheus_service_http_port) + return "http://{0}:{1}".format(private_ip_address, prometheus_service_http_port) def get_config(config_files_artifact_name): - config_file_path = shared_utils.path_join(CONFIG_DIR_MOUNTPOINT_ON_PROMETHEUS, shared_utils.path_base(CONFIG_FILENAME)) - return ServiceConfig( - image = IMAGE_NAME, - ports = USED_PORTS, - files = { - CONFIG_DIR_MOUNTPOINT_ON_PROMETHEUS: config_files_artifact_name - }, - cmd = [ - # You can check all the cli flags starting the container and going to the flags section - # in Prometheus admin page "{{prometheusPublicURL}}/flags" section - "--config.file=" + config_file_path, - "--storage.tsdb.path=/prometheus", - "--storage.tsdb.retention.time=1d", - "--storage.tsdb.retention.size=512MB", - "--storage.tsdb.wal-compression", - "--web.console.libraries=/etc/prometheus/console_libraries", - "--web.console.templates=/etc/prometheus/consoles", - "--web.enable-lifecycle", - ] - ) + config_file_path = shared_utils.path_join( + CONFIG_DIR_MOUNTPOINT_ON_PROMETHEUS, shared_utils.path_base(CONFIG_FILENAME) + ) + return ServiceConfig( + image=IMAGE_NAME, + ports=USED_PORTS, + files={CONFIG_DIR_MOUNTPOINT_ON_PROMETHEUS: config_files_artifact_name}, + cmd=[ + # You can check all the cli flags starting the container and going to the flags section + # in Prometheus admin page "{{prometheusPublicURL}}/flags" section + "--config.file=" + config_file_path, + "--storage.tsdb.path=/prometheus", + "--storage.tsdb.retention.time=1d", + "--storage.tsdb.retention.size=512MB", + "--storage.tsdb.wal-compression", + "--web.console.libraries=/etc/prometheus/console_libraries", + "--web.console.templates=/etc/prometheus/consoles", + "--web.enable-lifecycle", + ], + ) def new_config_template_data(cl_nodes_metrics_info): - return { - "CLNodesMetricsInfo": cl_nodes_metrics_info - } + return {"CLNodesMetricsInfo": cl_nodes_metrics_info} diff --git a/src/shared_utils/shared_utils.star b/src/shared_utils/shared_utils.star index 02b78a377..84190647e 100644 --- a/src/shared_utils/shared_utils.star +++ b/src/shared_utils/shared_utils.star @@ -3,30 +3,46 @@ UDP_PROTOCOL = "UDP" HTTP_APPLICATION_PROTOCOL = "http" NOT_PROVIDED_APPLICATION_PROTOCOL = "" NOT_PROVIDED_WAIT = "not-provided-wait" + + def new_template_and_data(template, template_data_json): - return struct(template = template, data = template_data_json) + return struct(template=template, data=template_data_json) def path_join(*args): - joined_path = "/".join(args) - return joined_path.replace("//", "/") + joined_path = "/".join(args) + return joined_path.replace("//", "/") def path_base(path): - split_path = path.split("/") - return split_path[-1] + split_path = path.split("/") + return split_path[-1] def path_dir(path): - split_path = path.split("/") - if len(split_path) <= 1: - return "." - split_path = split_path[:-1] - return "/".join(split_path) or "/" - - -def new_port_spec(number, transport_protocol, application_protocol = NOT_PROVIDED_APPLICATION_PROTOCOL, wait = NOT_PROVIDED_WAIT): - if (wait == NOT_PROVIDED_WAIT): - return PortSpec(number = number, transport_protocol = transport_protocol, application_protocol = application_protocol) - - return PortSpec(number = number, transport_protocol = transport_protocol, application_protocol = application_protocol, wait = wait) + split_path = path.split("/") + if len(split_path) <= 1: + return "." + split_path = split_path[:-1] + return "/".join(split_path) or "/" + + +def new_port_spec( + number, + transport_protocol, + application_protocol=NOT_PROVIDED_APPLICATION_PROTOCOL, + wait=NOT_PROVIDED_WAIT, +): + if wait == NOT_PROVIDED_WAIT: + return PortSpec( + number=number, + transport_protocol=transport_protocol, + application_protocol=application_protocol, + ) + + return PortSpec( + number=number, + transport_protocol=transport_protocol, + application_protocol=application_protocol, + wait=wait, + ) diff --git a/src/static_files/static_files.star b/src/static_files/static_files.star index e9acadc45..cbe57f210 100644 --- a/src/static_files/static_files.star +++ b/src/static_files/static_files.star @@ -2,22 +2,30 @@ STATIC_FILES_DIRPATH = "github.com/kurtosis-tech/eth2-package/static_files" # CL Forkmon config -CL_FORKMON_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + \ - "/cl-forkmon-config/config.toml.tmpl" +CL_FORKMON_CONFIG_TEMPLATE_FILEPATH = ( + STATIC_FILES_DIRPATH + "/cl-forkmon-config/config.toml.tmpl" +) # EL Forkmon config -EL_FORKMON_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + \ - "/el-forkmon-config/config.toml.tmpl" +EL_FORKMON_CONFIG_TEMPLATE_FILEPATH = ( + STATIC_FILES_DIRPATH + "/el-forkmon-config/config.toml.tmpl" +) # Prometheus config -PROMETHEUS_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + \ - "/prometheus-config/prometheus.yml.tmpl" +PROMETHEUS_CONFIG_TEMPLATE_FILEPATH = ( + STATIC_FILES_DIRPATH + "/prometheus-config/prometheus.yml.tmpl" +) # Grafana config GRAFANA_CONFIG_DIRPATH = "/grafana-config" -GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + \ - GRAFANA_CONFIG_DIRPATH + "/templates/datasource.yml.tmpl" -GRAFANA_DASHBOARD_PROVIDERS_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + \ - GRAFANA_CONFIG_DIRPATH + "/templates/dashboard-providers.yml.tmpl" -GRAFANA_DASHBOARDS_CONFIG_DIRPATH = STATIC_FILES_DIRPATH + \ - GRAFANA_CONFIG_DIRPATH + "/dashboards/dashboard.json" +GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH = ( + STATIC_FILES_DIRPATH + GRAFANA_CONFIG_DIRPATH + "/templates/datasource.yml.tmpl" +) +GRAFANA_DASHBOARD_PROVIDERS_CONFIG_TEMPLATE_FILEPATH = ( + STATIC_FILES_DIRPATH + + GRAFANA_CONFIG_DIRPATH + + "/templates/dashboard-providers.yml.tmpl" +) +GRAFANA_DASHBOARDS_CONFIG_DIRPATH = ( + STATIC_FILES_DIRPATH + GRAFANA_CONFIG_DIRPATH + "/dashboards/dashboard.json" +) diff --git a/src/testnet_verifier/testnet_verifier.star b/src/testnet_verifier/testnet_verifier.star index 1baf0e1bb..3b8146163 100644 --- a/src/testnet_verifier/testnet_verifier.star +++ b/src/testnet_verifier/testnet_verifier.star @@ -3,63 +3,82 @@ SERVICE_NAME = "testnet-verifier" # We use Docker exec commands to run the commands we need, so we override the default SYNCHRONOUS_ENTRYPOINT_ARGS = [ - "sleep", - "999999", + "sleep", + "999999", ] # this is broken check - https://github.com/ethereum/merge-testnet-verifier/issues/4 def launch_testnet_verifier(plan, params, el_client_contexts, cl_client_contexts): - config = get_asynchronous_verification_config(params, el_client_contexts, cl_client_contexts) - plan.add_service(SERVICE_NAME, config) + config = get_asynchronous_verification_config( + params, el_client_contexts, cl_client_contexts + ) + plan.add_service(SERVICE_NAME, config) -def run_synchronous_testnet_verification(plan, params, el_client_contexts, cl_client_contexts): - config = get_synchronous_verification_config() - plan.add_service(SERVICE_NAME, config) +def run_synchronous_testnet_verification( + plan, params, el_client_contexts, cl_client_contexts +): + config = get_synchronous_verification_config() + plan.add_service(SERVICE_NAME, config) - command = get_cmd(params, el_client_contexts, cl_client_contexts, True) - exec_result = plan.exec(recipe=ExecRecipe(command=command), service_name=SERVICE_NAME) - plan.assert(exec_result["code"], "==", 0) + command = get_cmd(params, el_client_contexts, cl_client_contexts, True) + exec_result = plan.exec( + recipe=ExecRecipe(command=command), service_name=SERVICE_NAME + ) + if exec_result["code"] != 0: + error('Expected exec_result["code"] to be equal to 0') def get_cmd(params, el_client_contexts, cl_client_contexts, add_binary_name): - command = [] - - if add_binary_name: - command.append("./merge_testnet_verifier") - - command.append("--ttd") - command.append("0") - - for el_client_context in el_client_contexts: - command.append("--client") - command.append("{0},http://{1}:{2}".format(el_client_context.client_name, el_client_context.ip_addr, el_client_context.rpc_port_num)) - - for cl_client_context in cl_client_contexts: - command.append("--client") - command.append("{0},http://{1}:{2}".format(cl_client_context.client_name, cl_client_context.ip_addr, cl_client_context.http_port_num)) - - command.append("--ttd-epoch-limit") - command.append("0") - command.append("--verif-epoch-limit") - command.append("{0}".format(params.verifications_epoch_limit)) - - return command - - - - -def get_asynchronous_verification_config(params, el_client_contexts, cl_client_contexts): - commands = get_cmd(params, el_client_contexts, cl_client_contexts, False) - return ServiceConfig( - image = IMAGE_NAME, - cmd = commands, - ) + command = [] + + if add_binary_name: + command.append("./merge_testnet_verifier") + + command.append("--ttd") + command.append("0") + + for el_client_context in el_client_contexts: + command.append("--client") + command.append( + "{0},http://{1}:{2}".format( + el_client_context.client_name, + el_client_context.ip_addr, + el_client_context.rpc_port_num, + ) + ) + + for cl_client_context in cl_client_contexts: + command.append("--client") + command.append( + "{0},http://{1}:{2}".format( + cl_client_context.client_name, + cl_client_context.ip_addr, + cl_client_context.http_port_num, + ) + ) + + command.append("--ttd-epoch-limit") + command.append("0") + command.append("--verif-epoch-limit") + command.append("{0}".format(params.verifications_epoch_limit)) + + return command + + +def get_asynchronous_verification_config( + params, el_client_contexts, cl_client_contexts +): + commands = get_cmd(params, el_client_contexts, cl_client_contexts, False) + return ServiceConfig( + image=IMAGE_NAME, + cmd=commands, + ) def get_synchronous_verification_config(): - return ServiceConfig( - image = IMAGE_NAME, - entrypoint = SYNCHRONOUS_ENTRYPOINT_ARGS, - ) + return ServiceConfig( + image=IMAGE_NAME, + entrypoint=SYNCHRONOUS_ENTRYPOINT_ARGS, + ) diff --git a/src/transaction_spammer/transaction_spammer.star b/src/transaction_spammer/transaction_spammer.star index 14572664a..df1332aa6 100644 --- a/src/transaction_spammer/transaction_spammer.star +++ b/src/transaction_spammer/transaction_spammer.star @@ -1,28 +1,30 @@ IMAGE_NAME = "kurtosistech/tx-fuzz:0.2.0" SERVICE_NAME = "transaction-spammer" + def launch_transaction_spammer(plan, prefunded_addresses, el_client_context): - config = get_config(prefunded_addresses, el_client_context) - plan.add_service(SERVICE_NAME, config) + config = get_config(prefunded_addresses, el_client_context) + plan.add_service(SERVICE_NAME, config) def get_config(prefunded_addresses, el_client_context): - private_keys_strs = [] - address_strs = [] - - for prefunded_address in prefunded_addresses: - private_keys_strs.append(prefunded_address.private_key) - address_strs.append(prefunded_address.address) + private_keys_strs = [] + address_strs = [] - comma_separated_private_keys = ",".join(private_keys_strs) - comma_separated_addresses = ",".join(address_strs) - return ServiceConfig( - image = IMAGE_NAME, - cmd = [ - "http://{0}:{1}".format(el_client_context.ip_addr, el_client_context.rpc_port_num), - "spam", - comma_separated_private_keys, - comma_separated_addresses - ] - ) + for prefunded_address in prefunded_addresses: + private_keys_strs.append(prefunded_address.private_key) + address_strs.append(prefunded_address.address) + comma_separated_private_keys = ",".join(private_keys_strs) + comma_separated_addresses = ",".join(address_strs) + return ServiceConfig( + image=IMAGE_NAME, + cmd=[ + "http://{0}:{1}".format( + el_client_context.ip_addr, el_client_context.rpc_port_num + ), + "spam", + comma_separated_private_keys, + comma_separated_addresses, + ], + )