diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ce3f1bb50..eeea4b899 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -51,9 +51,25 @@ jobs: if: steps.output-cache.outputs.cache-hit != 'true' run: make compile-port compile-native + download-beacon-node-oapi: + name: Download Beacon Node OAPI + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - name: Cache Beacon Node OAPI + id: output-cache + uses: actions/cache@v3 + with: + path: ./beacon-node-oapi.json + key: ${{ runner.os }}-beacon-node-oapi-${{ hashFiles('.oapi_version') }} + lookup-only: true + - name: Download Beacon Node OAPI + if: steps.output-cache.outputs.cache-hit != 'true' + run: make download-beacon-node-oapi + build: name: Build project - needs: compile-native + needs: [compile-native, download-beacon-node-oapi] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 @@ -75,6 +91,12 @@ jobs: path: deps key: ${{ runner.os }}-mix-${{ hashFiles('**/mix.lock') }} restore-keys: ${{ runner.os }}-mix- + - name: Fetch beacon node oapi file + uses: actions/cache/restore@v3 + with: + path: ./beacon-node-oapi.json + key: ${{ runner.os }}-beacon-node-oapi-${{ hashFiles('.oapi_version') }} + fail-on-cache-miss: true - name: Install dependencies run: | sudo apt-get install -y protobuf-compiler @@ -104,7 +126,7 @@ jobs: smoke: name: Start and stop the node - needs: compile-native + needs: [compile-native, download-beacon-node-oapi] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 @@ -126,6 +148,12 @@ jobs: path: deps key: ${{ runner.os }}-mix-${{ hashFiles('**/mix.lock') }} restore-keys: ${{ runner.os }}-mix- + - name: Fetch beacon node oapi file + uses: actions/cache/restore@v3 + with: + path: ./beacon-node-oapi.json + key: ${{ runner.os }}-beacon-node-oapi-${{ hashFiles('.oapi_version') }} + fail-on-cache-miss: true - name: Install dependencies run: | sudo apt-get install -y protobuf-compiler @@ -145,7 +173,7 @@ jobs: test: name: Test - needs: compile-native + needs: [compile-native, download-beacon-node-oapi] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 @@ -167,6 +195,12 @@ jobs: path: deps key: ${{ runner.os }}-mix-${{ hashFiles('**/mix.lock') }} restore-keys: ${{ runner.os }}-mix- + - name: Fetch beacon node oapi file + uses: actions/cache/restore@v3 + with: + path: ./beacon-node-oapi.json + key: ${{ runner.os }}-beacon-node-oapi-${{ hashFiles('.oapi_version') }} + fail-on-cache-miss: true - name: Set up cargo cache uses: Swatinem/rust-cache@v2 with: @@ -223,7 +257,7 @@ jobs: spectests: name: Run spec-tests - needs: [compile-native, download-spectests] + needs: [compile-native, download-spectests, download-beacon-node-oapi] strategy: matrix: config: ["minimal", "general", "mainnet"] @@ -253,6 +287,12 @@ jobs: path: deps key: ${{ runner.os }}-mix-${{ hashFiles('**/mix.lock') }} restore-keys: ${{ runner.os }}-mix- + - name: Fetch beacon node oapi file + uses: actions/cache/restore@v3 + with: + path: ./beacon-node-oapi.json + key: ${{ runner.os }}-beacon-node-oapi-${{ hashFiles('.oapi_version') }} + fail-on-cache-miss: true - name: Set up cargo cache uses: Swatinem/rust-cache@v2 with: diff --git a/.gitignore b/.gitignore index 7e82c30ad..f417f1396 100644 --- a/.gitignore +++ b/.gitignore @@ -44,20 +44,24 @@ priv .vscode/ # spec-test vectors -test/spec/vectors +/test/spec/vectors -native/libp2p_port/libp2p_port +/native/libp2p_port/libp2p_port # Proto generated code. *.pb.ex *.pb.go # local db. -level_db +/level_db +/logs # Generated tests -test/generated +/test/generated # profiling artifacts callgrind.out.* *-eflambe-output.bggg + +# beacon node oapi json file +beacon-node-oapi.json diff --git a/.oapi_version b/.oapi_version new file mode 100644 index 000000000..3dfbe3369 --- /dev/null +++ b/.oapi_version @@ -0,0 +1 @@ +v2.4.2 diff --git a/Makefile b/Makefile index 144011e8b..0c822c710 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ clean-vectors download-vectors uncompress-vectors proto \ spec-test-% spec-test spec-test-config-% spec-test-runner-% \ spec-test-mainnet-% spec-test-minimal-% spec-test-general-% \ - clean-tests gen-spec compile-all + clean-tests gen-spec compile-all download-beacon-node-oapi # Delete current file when command fails .DELETE_ON_ERROR: @@ -86,7 +86,7 @@ proto: $(PROTOBUF_EX_FILES) $(PROTOBUF_GO_FILES) compile-native: $(OUTPUT_DIR)/libp2p_nif.so $(OUTPUT_DIR)/libp2p_port #🔨 compile-all: @ Compile the elixir project and its dependencies. -compile-all: compile-native $(PROTOBUF_EX_FILES) +compile-all: compile-native $(PROTOBUF_EX_FILES) download-beacon-node-oapi mix compile #🗑️ clean: @ Remove the build files. @@ -126,6 +126,17 @@ sepolia: compile-all test: compile-all mix test --no-start --exclude spectest +#### BEACON NODE OAPI #### +OAPI_NAME = beacon-node-oapi +OAPI_VERSION := $(shell cat .oapi_version) +$(OAPI_NAME).json: .oapi_version + curl -L -o "$@" \ + "https://ethereum.github.io/beacon-APIs/releases/${OAPI_VERSION}/beacon-node-oapi.json" + +OPENAPI_JSON := $(OAPI_NAME).json + +download-beacon-node-oapi: ${OPENAPI_JSON} + ##### SPEC TEST VECTORS ##### SPECTEST_VERSION := $(shell cat .spectest_version) @@ -206,3 +217,7 @@ fmt: cd native/snappy_nif; cargo fmt cd native/ssz_nif; cargo fmt cd native/bls_nif; cargo fmt + +#✅ dialyzer: @ Run dialyzer (static analysis tool). +dialyzer: + mix dialyzer diff --git a/README.md b/README.md index fdea9d514..c34c706d0 100644 --- a/README.md +++ b/README.md @@ -96,7 +96,7 @@ To run these checks locally: make test # Runs tests make spec-test # Runs all spec-tests make lint # Runs linter and format-checker -mix dialyzer # Runs type-checker +make dialyzer # Runs type-checker ``` Source code can be formatted using `make fmt`. diff --git a/bench/block_processing.exs b/bench/block_processing.exs index ca07d61c5..75ed57f99 100644 --- a/bench/block_processing.exs +++ b/bench/block_processing.exs @@ -1,6 +1,5 @@ alias LambdaEthereumConsensus.ForkChoice alias LambdaEthereumConsensus.ForkChoice.Handlers -alias LambdaEthereumConsensus.ForkChoice.Helpers alias LambdaEthereumConsensus.StateTransition.Cache alias LambdaEthereumConsensus.Store alias LambdaEthereumConsensus.Store.BlockStore @@ -20,12 +19,15 @@ IO.puts("fetching blocks...") {:ok, %SignedBeaconBlock{} = new_block} = BlockStore.get_block_by_slot(slot + 1) IO.puts("initializing store...") -{:ok, store} = Helpers.get_forkchoice_store(state, block, true) +{:ok, store} = Types.Store.get_forkchoice_store(state, block, true) store = Handlers.on_tick(store, store.time + 30) attestations = new_block.message.body.attestations attester_slashings = new_block.message.body.attester_slashings +{:ok, root} = BlockStore.get_block_root_by_slot(slot) + +IO.puts("about to process block: #{slot + 1}, with root: #{Base.encode16(root)}...") IO.puts("#{length(attestations)} attestations ; #{length(attester_slashings)} attester slashings") IO.puts("") diff --git a/config/config.exs b/config/config.exs index 9a9c9a280..69a2e77f1 100644 --- a/config/config.exs +++ b/config/config.exs @@ -4,6 +4,31 @@ import Config # Configure logging config :logger, level: :info, truncate: :infinity +# # Uncomment to log to a file +# # TODO: we might want to enable this with a CLI flag +# config :logger, :default_handler, +# config: [ +# file: ~c"logs/system.log", +# filesync_repeat_interval: 5000, +# file_check: 5000, +# max_no_bytes: 10_000_000, +# max_no_files: 5, +# compress_on_rotate: true +# ] + +# # NOTE: We want to log UTC timestamps, for convenience +# config :logger, utc_log: true + +# config :logger, :default_formatter, +# format: {LogfmtEx, :format}, +# colors: [enabled: false], +# metadata: [:mfa] + +# config :logfmt_ex, :opts, +# message_key: "msg", +# timestamp_key: "ts", +# timestamp_format: :iso8601 + # Configures the phoenix endpoint config :lambda_ethereum_consensus, BeaconApi.Endpoint, http: [port: 4000], diff --git a/config/runtime.exs b/config/runtime.exs index aea30a440..54cb1bbf7 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -1,4 +1,5 @@ import Config +require Logger {args, _remaining_args, _errors} = OptionParser.parse(System.argv(), @@ -15,7 +16,7 @@ network = Keyword.get(args, :network, "mainnet") checkpoint_sync = Keyword.get(args, :checkpoint_sync) execution_endpoint = Keyword.get(args, :execution_endpoint, "http://localhost:8551") jwt_path = Keyword.get(args, :execution_jwt) -mock_execution = Keyword.get(args, :mock_execution, false) +mock_execution = Keyword.get(args, :mock_execution, config_env() == :test or is_nil(jwt_path)) config :lambda_ethereum_consensus, LambdaEthereumConsensus.ForkChoice, checkpoint_sync: checkpoint_sync @@ -61,3 +62,10 @@ block_time_ms = config :lambda_ethereum_consensus, LambdaEthereumConsensus.Telemetry, block_processing_buckets: [0.5, 1.0, 1.5, 2, 4, 6, 8] |> Enum.map(&(&1 * block_time_ms)) + +if is_nil(jwt_secret) do + Logger.warning( + "[EngineAPI] A JWT secret is needed for communication with the execution engine. " <> + "Please specify the file to load it from with the --execution-jwt flag." + ) +end diff --git a/lib/beacon_api/api_spec.ex b/lib/beacon_api/api_spec.ex new file mode 100644 index 000000000..5d0853b40 --- /dev/null +++ b/lib/beacon_api/api_spec.ex @@ -0,0 +1,15 @@ +defmodule BeaconApi.ApiSpec do + @moduledoc false + alias OpenApiSpex.OpenApi + @behaviour OpenApi + + file = "beacon-node-oapi.json" + @external_resource file + @ethspec file + |> File.read!() + |> Jason.decode!() + |> OpenApiSpex.OpenApi.Decode.decode() + + @impl OpenApi + def spec, do: @ethspec +end diff --git a/lib/beacon_api/controllers/v1/beacon_controller.ex b/lib/beacon_api/controllers/v1/beacon_controller.ex index d95f2cc70..ff5fb64a1 100644 --- a/lib/beacon_api/controllers/v1/beacon_controller.ex +++ b/lib/beacon_api/controllers/v1/beacon_controller.ex @@ -1,11 +1,25 @@ defmodule BeaconApi.V1.BeaconController do + alias BeaconApi.ApiSpec alias BeaconApi.ErrorController + alias LambdaEthereumConsensus.ForkChoice alias LambdaEthereumConsensus.Store.BlockStore use BeaconApi, :controller + plug(OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true) + + @doc """ + action is an atom that correspond to the controller action's function atoms declared on `BeaconApi.Router` + """ + def open_api_operation(action) when is_atom(action) do + apply(__MODULE__, :"#{action}_operation", []) + end + + def get_state_root_operation, + do: ApiSpec.spec().paths["/eth/v1/beacon/states/{state_id}/root"].get + @spec get_state_root(Plug.Conn.t(), any) :: Plug.Conn.t() - def get_state_root(conn, %{"state_id" => state_id}) do + def get_state_root(conn, %{state_id: state_id}) do with {:ok, {root, execution_optimistic, finalized}} <- BeaconApi.Utils.parse_id(state_id) |> ForkChoice.Helpers.root_by_id(), {:ok, state_root} <- ForkChoice.Helpers.get_state_root(root) do @@ -25,28 +39,31 @@ defmodule BeaconApi.V1.BeaconController do end end + def get_block_root_operation, + do: ApiSpec.spec().paths["/eth/v1/beacon/blocks/{block_id}/root"].get + @spec get_block_root(Plug.Conn.t(), any) :: Plug.Conn.t() - def get_block_root(conn, %{"block_id" => "head"}) do + def get_block_root(conn, %{block_id: "head"}) do # TODO: determine head and return it conn |> block_not_found() end - def get_block_root(conn, %{"block_id" => "finalized"}) do + def get_block_root(conn, %{block_id: "finalized"}) do # TODO conn |> block_not_found() end - def get_block_root(conn, %{"block_id" => "justified"}) do + def get_block_root(conn, %{block_id: "justified"}) do # TODO conn |> block_not_found() end - def get_block_root(conn, %{"block_id" => "genesis"}) do + def get_block_root(conn, %{block_id: "genesis"}) do # TODO conn |> block_not_found() end - def get_block_root(conn, %{"block_id" => "0x" <> hex_block_id}) do + def get_block_root(conn, %{block_id: "0x" <> hex_block_id}) do with {:ok, block_root} <- Base.decode16(hex_block_id, case: :mixed), {:ok, _signed_block} <- BlockStore.get_block(block_root) do conn |> root_response(block_root, true, false) @@ -56,7 +73,7 @@ defmodule BeaconApi.V1.BeaconController do end end - def get_block_root(conn, %{"block_id" => block_id}) do + def get_block_root(conn, %{block_id: block_id}) do with {slot, ""} when slot >= 0 <- Integer.parse(block_id), {:ok, block_root} <- BlockStore.get_block_root_by_slot(slot) do conn |> root_response(block_root, true, false) diff --git a/lib/beacon_api/controllers/v2/beacon_controller.ex b/lib/beacon_api/controllers/v2/beacon_controller.ex index 365116da9..0bcf522a5 100644 --- a/lib/beacon_api/controllers/v2/beacon_controller.ex +++ b/lib/beacon_api/controllers/v2/beacon_controller.ex @@ -1,30 +1,43 @@ defmodule BeaconApi.V2.BeaconController do + alias BeaconApi.ApiSpec alias BeaconApi.ErrorController alias LambdaEthereumConsensus.Store.BlockStore use BeaconApi, :controller + plug(OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true) + + @doc """ + action is an atom that correspond to the controller action's function atoms declared on `BeaconApi.Router` + """ + def open_api_operation(action) when is_atom(action) do + apply(__MODULE__, :"#{action}_operation", []) + end + + def get_block_operation, + do: ApiSpec.spec().paths["/eth/v2/beacon/blocks/{block_id}"].get + @spec get_block(Plug.Conn.t(), any) :: Plug.Conn.t() - def get_block(conn, %{"block_id" => "head"}) do + def get_block(conn, %{block_id: "head"}) do # TODO: determine head and return it conn |> block_not_found() end - def get_block(conn, %{"block_id" => "finalized"}) do + def get_block(conn, %{block_id: "finalized"}) do # TODO conn |> block_not_found() end - def get_block(conn, %{"block_id" => "justified"}) do + def get_block(conn, %{block_id: "justified"}) do # TODO conn |> block_not_found() end - def get_block(conn, %{"block_id" => "genesis"}) do + def get_block(conn, %{block_id: "genesis"}) do # TODO conn |> block_not_found() end - def get_block(conn, %{"block_id" => "0x" <> hex_block_id}) do + def get_block(conn, %{block_id: "0x" <> hex_block_id}) do with {:ok, block_root} <- Base.decode16(hex_block_id, case: :mixed), {:ok, block} <- BlockStore.get_block(block_root) do conn |> block_response(block) @@ -34,7 +47,7 @@ defmodule BeaconApi.V2.BeaconController do end end - def get_block(conn, %{"block_id" => block_id}) do + def get_block(conn, %{block_id: block_id}) do with {slot, ""} when slot >= 0 <- Integer.parse(block_id), {:ok, block} <- BlockStore.get_block_by_slot(slot) do conn |> block_response(block) diff --git a/lib/beacon_api/router.ex b/lib/beacon_api/router.ex index 57904ce2c..da06d17ad 100644 --- a/lib/beacon_api/router.ex +++ b/lib/beacon_api/router.ex @@ -3,6 +3,7 @@ defmodule BeaconApi.Router do pipeline :api do plug(:accepts, ["json"]) + plug(OpenApiSpex.Plug.PutApiSpec, module: BeaconApi.ApiSpec) end # Ethereum API Version 1 @@ -24,6 +25,11 @@ defmodule BeaconApi.Router do end end + scope "/api" do + pipe_through(:api) + get("/openapi", OpenApiSpex.Plug.RenderSpec, []) + end + # Catch-all route outside of any scope match(:*, "/*path", BeaconApi.ErrorController, :not_found) end diff --git a/lib/chain_spec/configs/gen_config.ex b/lib/chain_spec/configs/gen_config.ex new file mode 100644 index 000000000..4ac441250 --- /dev/null +++ b/lib/chain_spec/configs/gen_config.ex @@ -0,0 +1,35 @@ +defmodule ChainSpec.GenConfig do + @moduledoc """ + Generic config behaviour, for auto-implementing configs. + """ + + defmacro __using__(opts) do + file = Keyword.fetch!(opts, :file) + config = ConfigUtils.load_config_from_file!(file) + preset = Map.fetch!(config, "PRESET_BASE") |> parse_preset() + + quote do + file = unquote(file) + config = unquote(Macro.escape(config)) + preset = unquote(preset) + + @external_resource file + @__parsed_config config + @__unified Map.merge(preset.get_preset(), @__parsed_config) + + @behaviour unquote(__MODULE__) + + @impl unquote(__MODULE__) + def get(key), do: Map.fetch!(@__unified, key) + end + end + + defp parse_preset("mainnet"), do: MainnetPreset + defp parse_preset("minimal"), do: MinimalPreset + defp parse_preset(other), do: raise("Unknown preset: #{other}") + + @doc """ + Fetches a value from config. + """ + @callback get(String.t()) :: term() +end diff --git a/lib/chain_spec/configs/mainnet.ex b/lib/chain_spec/configs/mainnet.ex index 9d1cdf265..8475f30e2 100644 --- a/lib/chain_spec/configs/mainnet.ex +++ b/lib/chain_spec/configs/mainnet.ex @@ -2,12 +2,5 @@ defmodule MainnetConfig do @moduledoc """ Mainnet config constants. """ - file = "config/networks/mainnet/config.yaml" - - @external_resource file - - @parsed_config ConfigUtils.load_config_from_file!(file) - @unified Map.merge(MainnetPreset.get_preset(), @parsed_config) - - def get(key), do: Map.fetch!(@unified, key) + use ChainSpec.GenConfig, file: "config/networks/mainnet/config.yaml" end diff --git a/lib/chain_spec/configs/minimal.ex b/lib/chain_spec/configs/minimal.ex index 2348c2ace..71d089dfe 100644 --- a/lib/chain_spec/configs/minimal.ex +++ b/lib/chain_spec/configs/minimal.ex @@ -2,12 +2,5 @@ defmodule MinimalConfig do @moduledoc """ Minimal config constants. """ - file = "config/networks/minimal/config.yaml" - - @external_resource file - - @parsed_config ConfigUtils.load_config_from_file!(file) - @unified Map.merge(MinimalPreset.get_preset(), @parsed_config) - - def get(key), do: Map.fetch!(@unified, key) + use ChainSpec.GenConfig, file: "config/networks/minimal/config.yaml" end diff --git a/lib/chain_spec/configs/sepolia.ex b/lib/chain_spec/configs/sepolia.ex index 10cf1f8d9..3c716d335 100644 --- a/lib/chain_spec/configs/sepolia.ex +++ b/lib/chain_spec/configs/sepolia.ex @@ -2,12 +2,5 @@ defmodule SepoliaConfig do @moduledoc """ Sepolia config constants. """ - file = "config/networks/sepolia/config.yaml" - - @external_resource file - - @parsed_config ConfigUtils.load_config_from_file!(file) - @unified Map.merge(MainnetPreset.get_preset(), @parsed_config) - - def get(key), do: Map.fetch!(@unified, key) + use ChainSpec.GenConfig, file: "config/networks/sepolia/config.yaml" end diff --git a/lib/chain_spec/presets/gen_preset.ex b/lib/chain_spec/presets/gen_preset.ex new file mode 100644 index 000000000..22abb16eb --- /dev/null +++ b/lib/chain_spec/presets/gen_preset.ex @@ -0,0 +1,27 @@ +defmodule ChainSpec.GenPreset do + @moduledoc """ + Generic preset behaviour, for auto-implementing presets. + """ + + defmacro __using__(opts) do + file = Keyword.fetch!(opts, :file) + + quote do + file = unquote(file) + + @external_resource file + + @__parsed_preset ConfigUtils.load_preset_from_dir!(file) + + @behaviour unquote(__MODULE__) + + @impl unquote(__MODULE__) + def get_preset, do: @__parsed_preset + end + end + + @doc """ + Fetches the whole preset. + """ + @callback get_preset() :: map() +end diff --git a/lib/chain_spec/presets/mainnet.ex b/lib/chain_spec/presets/mainnet.ex index e83f7d8ba..c536d2109 100644 --- a/lib/chain_spec/presets/mainnet.ex +++ b/lib/chain_spec/presets/mainnet.ex @@ -2,11 +2,5 @@ defmodule MainnetPreset do @moduledoc """ Mainnet preset constants. """ - - file = "config/presets/mainnet" - @external_resource file - - @parsed_preset ConfigUtils.load_preset_from_dir!(file) - - def get_preset, do: @parsed_preset + use ChainSpec.GenPreset, file: "config/presets/mainnet" end diff --git a/lib/chain_spec/presets/minimal.ex b/lib/chain_spec/presets/minimal.ex index 2a5352276..24df2d39a 100644 --- a/lib/chain_spec/presets/minimal.ex +++ b/lib/chain_spec/presets/minimal.ex @@ -2,11 +2,5 @@ defmodule MinimalPreset do @moduledoc """ Minimal preset constants. """ - - file = "config/presets/minimal" - @external_resource file - - @parsed_preset ConfigUtils.load_preset_from_dir!(file) - - def get_preset, do: @parsed_preset + use ChainSpec.GenPreset, file: "config/presets/minimal" end diff --git a/lib/lambda_ethereum_consensus/beacon/beacon_chain.ex b/lib/lambda_ethereum_consensus/beacon/beacon_chain.ex index a9e3ce6de..24727f8be 100644 --- a/lib/lambda_ethereum_consensus/beacon/beacon_chain.ex +++ b/lib/lambda_ethereum_consensus/beacon/beacon_chain.ex @@ -13,13 +13,20 @@ defmodule LambdaEthereumConsensus.Beacon.BeaconChain do defstruct [ :genesis_time, :genesis_validators_root, - :time + :time, + :cached_fork_choice ] @type t :: %__MODULE__{ genesis_time: Types.uint64(), genesis_validators_root: Types.bytes32(), - time: Types.uint64() + time: Types.uint64(), + cached_fork_choice: %{ + head_root: Types.root(), + head_slot: Types.slot(), + finalized_root: Types.root(), + finalized_epoch: Types.epoch() + } } end @@ -33,6 +40,14 @@ defmodule LambdaEthereumConsensus.Beacon.BeaconChain do GenServer.call(__MODULE__, :get_current_slot) end + @spec update_fork_choice_cache(Types.root(), Types.slot(), Types.root(), Types.epoch()) :: :ok + def update_fork_choice_cache(head_root, head_slot, finalized_root, finalized_epoch) do + GenServer.cast( + __MODULE__, + {:update_fork_choice_cache, head_root, head_slot, finalized_root, finalized_epoch} + ) + end + @spec get_current_epoch() :: integer() def get_current_epoch do Misc.compute_epoch_at_slot(get_current_slot()) @@ -50,20 +65,8 @@ defmodule LambdaEthereumConsensus.Beacon.BeaconChain do @spec get_current_status_message() :: {:ok, Types.StatusMessage.t()} | {:error, any} def get_current_status_message do - # TODO: un-hardcode when get_head is optimized and/or cached - # GenServer.call(__MODULE__, :get_current_status_message, @default_timeout) - - # hardcoded response from random peer - {:ok, - %Types.StatusMessage{ - fork_digest: get_fork_digest(), - finalized_root: - Base.decode16!("7715794499C07D9954DD223EC2C6B846D3BAB27956D093000FADC1B8219F74D4"), - finalized_epoch: 228_168, - head_root: - Base.decode16!("D62A74AE0F933224133C5E6E1827A2835A1E705F0CDFEE3AD25808DDEA5572DB"), - head_slot: 7_301_450 - }} + status_message = GenServer.call(__MODULE__, :get_current_status_message) + {:ok, status_message} end ########################## @@ -79,6 +82,12 @@ defmodule LambdaEthereumConsensus.Beacon.BeaconChain do %BeaconChainState{ genesis_time: anchor_state.genesis_time, genesis_validators_root: anchor_state.genesis_validators_root, + cached_fork_choice: %{ + head_root: <<0::256>>, + head_slot: anchor_state.slot, + finalized_root: anchor_state.finalized_checkpoint.root, + finalized_epoch: anchor_state.finalized_checkpoint.epoch + }, time: time }} end @@ -90,23 +99,38 @@ defmodule LambdaEthereumConsensus.Beacon.BeaconChain do @impl true def handle_call({:get_fork_digest, slot}, _from, state) do - current_fork_version = + fork_digest = case slot do nil -> compute_current_slot(state) _ -> slot end - |> Misc.compute_epoch_at_slot() - |> ChainSpec.get_fork_version_for_epoch() - - fork_digest = - Misc.compute_fork_digest( - current_fork_version, - state.genesis_validators_root - ) + |> compute_fork_digest(state.genesis_validators_root) {:reply, fork_digest, state} end + @impl true + @spec handle_call(:get_current_status_message, any, BeaconChainState.t()) :: + {:reply, Types.StatusMessage.t(), BeaconChainState.t()} + def handle_call(:get_current_status_message, _from, state) do + %{ + head_root: head_root, + head_slot: head_slot, + finalized_root: finalized_root, + finalized_epoch: finalized_epoch + } = state.cached_fork_choice + + status_message = %Types.StatusMessage{ + fork_digest: compute_fork_digest(head_slot, state.genesis_validators_root), + finalized_root: finalized_root, + finalized_epoch: finalized_epoch, + head_root: head_root, + head_slot: head_slot + } + + {:reply, status_message, state} + end + @impl true def handle_info(:on_tick, state) do schedule_next_tick() @@ -118,6 +142,21 @@ defmodule LambdaEthereumConsensus.Beacon.BeaconChain do {:noreply, %BeaconChainState{state | time: time}} end + @impl true + def handle_cast( + {:update_fork_choice_cache, head_root, head_slot, finalized_root, finalized_epoch}, + state + ) do + {:noreply, + state + |> Map.put(:cached_fork_choice, %{ + head_root: head_root, + head_slot: head_slot, + finalized_root: finalized_root, + finalized_epoch: finalized_epoch + })} + end + def schedule_next_tick do # For millisecond precision time_to_next_tick = 1000 - rem(:os.system_time(:millisecond), 1000) @@ -127,4 +166,14 @@ defmodule LambdaEthereumConsensus.Beacon.BeaconChain do defp compute_current_slot(state) do div(state.time - state.genesis_time, ChainSpec.get("SECONDS_PER_SLOT")) end + + defp compute_fork_digest(slot, genesis_validators_root) do + current_fork_version = + slot |> Misc.compute_epoch_at_slot() |> ChainSpec.get_fork_version_for_epoch() + + Misc.compute_fork_digest( + current_fork_version, + genesis_validators_root + ) + end end diff --git a/lib/lambda_ethereum_consensus/execution/engine_api.ex b/lib/lambda_ethereum_consensus/execution/engine_api.ex index 7de43c5e8..d0626dda9 100644 --- a/lib/lambda_ethereum_consensus/execution/engine_api.ex +++ b/lib/lambda_ethereum_consensus/execution/engine_api.ex @@ -10,19 +10,20 @@ defmodule LambdaEthereumConsensus.Execution.EngineApi do @spec exchange_capabilities() :: {:ok, any} | {:error, any} def exchange_capabilities, do: impl().exchange_capabilities() - @spec new_payload_v1(Types.ExecutionPayload.t()) :: + @spec new_payload(Types.ExecutionPayload.t()) :: {:ok, any} | {:error, any} - def new_payload_v1(execution_payload), do: impl().new_payload_v1(execution_payload) + def new_payload(execution_payload), do: impl().new_payload(execution_payload) @spec forkchoice_updated(map, map | any) :: {:ok, any} | {:error, any} def forkchoice_updated(forkchoice_state, payload_attributes), do: impl().forkchoice_updated(forkchoice_state, payload_attributes) - defp impl, - do: - Application.get_env( - __MODULE__, - :implementation, - LambdaEthereumConsensus.Execution.EngineApi.Api - ) + defp impl do + Application.fetch_env!( + :lambda_ethereum_consensus, + __MODULE__ + )[ + :implementation + ] + end end diff --git a/lib/lambda_ethereum_consensus/execution/engine_api/api.ex b/lib/lambda_ethereum_consensus/execution/engine_api/api.ex index b9460e519..ed3d9de0d 100644 --- a/lib/lambda_ethereum_consensus/execution/engine_api/api.ex +++ b/lib/lambda_ethereum_consensus/execution/engine_api/api.ex @@ -17,21 +17,18 @@ defmodule LambdaEthereumConsensus.Execution.EngineApi.Api do call("engine_exchangeCapabilities", [@supported_methods]) end - @spec new_payload_v1(Types.ExecutionPayload.t()) :: + @spec new_payload(Types.ExecutionPayload.t()) :: {:ok, any} | {:error, any} - def new_payload_v1(execution_payload) do - call("engine_newPayloadV2", [execution_payload]) + def new_payload(execution_payload) do + call("engine_newPayloadV2", [RPC.normalize(execution_payload)]) end @spec forkchoice_updated(map, map | any) :: {:ok, any} | {:error, any} def forkchoice_updated(forkchoice_state, payload_attributes) do - forkchoice_state = - forkchoice_state - |> Map.update!("finalizedBlockHash", &RPC.encode_binary/1) - |> Map.update!("headBlockHash", &RPC.encode_binary/1) - |> Map.update!("safeBlockHash", &RPC.encode_binary/1) - - call("engine_forkchoiceUpdatedV2", [forkchoice_state, payload_attributes]) + call("engine_forkchoiceUpdatedV2", [ + RPC.normalize(forkchoice_state), + RPC.normalize(payload_attributes) + ]) end defp call(method, params) do diff --git a/lib/lambda_ethereum_consensus/execution/engine_api/mocked.ex b/lib/lambda_ethereum_consensus/execution/engine_api/mocked.ex index fc7d71de0..b07f05ce1 100644 --- a/lib/lambda_ethereum_consensus/execution/engine_api/mocked.ex +++ b/lib/lambda_ethereum_consensus/execution/engine_api/mocked.ex @@ -12,42 +12,14 @@ defmodule LambdaEthereumConsensus.Execution.EngineApi.Mocked do {:ok, ["engine_newPayloadV2"]} end - @spec new_payload_v1(Types.ExecutionPayload.t()) :: + @spec new_payload(Types.ExecutionPayload.t()) :: {:ok, any} | {:error, any} - def new_payload_v1(_execution_payload) do - {:ok, generic_response()} + def new_payload(_execution_payload) do + {:ok, %{"status" => "SYNCING"}} end @spec forkchoice_updated(map, map | any) :: {:ok, any} | {:error, any} def forkchoice_updated(_forkchoice_state, _payload_attributes) do - {:ok, generic_response()} + {:ok, %{"payload_id" => nil, payload_status: %{"status" => "SYNCING"}}} end - - defp generic_response do - %{ - id: 1, - jsonrpc: "2.0", - result: %{ - payloadId: nil, - payloadStatus: %{ - status: "VALID", - latestValidHash: nil, - validationError: nil - } - }, - error: "" - } - end - - # # This will be used for logging - # defp mock_call(method, params) do - # config = - # Application.fetch_env!( - # :lambda_ethereum_consensus, - # LambdaEthereumConsensus.Execution.EngineApi - # ) - - # endpoint = Keyword.fetch!(config, :endpoint) - # version = Keyword.fetch!(config, :version) - # end end diff --git a/lib/lambda_ethereum_consensus/execution/execution_client.ex b/lib/lambda_ethereum_consensus/execution/execution_client.ex index d780c4657..3b6d9e8d7 100644 --- a/lib/lambda_ethereum_consensus/execution/execution_client.ex +++ b/lib/lambda_ethereum_consensus/execution/execution_client.ex @@ -8,10 +8,24 @@ defmodule LambdaEthereumConsensus.Execution.ExecutionClient do @doc """ Verifies the validity of the data contained in the new payload and notifies the Execution client of a new payload """ - @spec verify_and_notify_new_payload(Types.ExecutionPayload.t()) :: {:ok, any} | {:error, any} - def verify_and_notify_new_payload(_execution_payload) do - # TODO: call engine api - {:ok, true} + @spec verify_and_notify_new_payload(Types.ExecutionPayload.t()) :: + {:ok, :optimistic | :valid | :invalid} | {:error, String.t()} + def verify_and_notify_new_payload(execution_payload) do + result = EngineApi.new_payload(execution_payload) + + case result do + {:ok, %{"status" => "SYNCING"}} -> + {:ok, :optimistic} + + {:ok, %{"status" => "VALID"}} -> + {:ok, :valid} + + {:ok, %{"status" => "INVALID"}} -> + {:ok, :invalid} + + {:error, error} -> + {:error, error} + end end @doc """ @@ -29,9 +43,9 @@ defmodule LambdaEthereumConsensus.Execution.ExecutionClient do {:ok, any} | {:error, any} def notify_forkchoice_updated(head_block_hash, safe_block_hash, finalized_block_hash) do fork_choice_state = %{ - finalizedBlockHash: finalized_block_hash, - headBlockHash: head_block_hash, - safeBlockHash: safe_block_hash + finalized_block_hash: finalized_block_hash, + head_block_hash: head_block_hash, + safe_block_hash: safe_block_hash } EngineApi.forkchoice_updated(fork_choice_state, nil) diff --git a/lib/lambda_ethereum_consensus/execution/rpc.ex b/lib/lambda_ethereum_consensus/execution/rpc.ex index f4eea6034..c4991b585 100644 --- a/lib/lambda_ethereum_consensus/execution/rpc.ex +++ b/lib/lambda_ethereum_consensus/execution/rpc.ex @@ -30,12 +30,59 @@ defmodule LambdaEthereumConsensus.Execution.RPC do if Map.has_key?(result.body, "error") do {:error, result.body["error"]["message"]} else - {:ok, result.body["result"]} + {:ok, result.body["result"] |> normalize_response()} end end + def normalize(nil), do: nil + + def normalize(payload) when is_struct(payload) do + normalize(Map.from_struct(payload)) + end + + def normalize(payload) when is_map(payload) do + Enum.reduce(payload, %{}, fn {k, v}, acc -> + Map.put(acc, to_camel_case(k), normalize(v)) + end) + end + + def normalize(payload) when is_list(payload) do + Enum.map(payload, &normalize/1) + end + + def normalize(payload) when is_binary(payload) do + encode_binary(payload) + end + + def normalize(payload) when is_integer(payload) do + payload |> encode_integer() + end + + def normalize_response(response) when is_map(response) do + Enum.reduce(response, %{}, fn {k, v}, acc -> + Map.put(acc, Recase.to_snake(k), v) + end) + end + + def normalize_response(response) do + response + end + @spec encode_binary(binary) :: binary def encode_binary(binary) do "0x" <> Base.encode16(binary, case: :lower) end + + def encode_integer(integer) do + "0x" <> Integer.to_string(integer, 16) + end + + defp to_camel_case(key) when is_atom(key) do + Atom.to_string(key) |> to_camel_case() + end + + defp to_camel_case(key) when is_binary(key) do + key + |> Recase.to_camel() + end end diff --git a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex index 831a6cc2b..28149d284 100644 --- a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex +++ b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex @@ -6,6 +6,8 @@ defmodule LambdaEthereumConsensus.ForkChoice do use GenServer require Logger + alias LambdaEthereumConsensus.Beacon.BeaconChain + alias LambdaEthereumConsensus.Execution.ExecutionClient alias LambdaEthereumConsensus.ForkChoice.{Handlers, Helpers} alias Types.Attestation alias Types.BeaconState @@ -38,8 +40,7 @@ defmodule LambdaEthereumConsensus.ForkChoice do @spec has_block?(Types.root()) :: boolean() def has_block?(block_root) do - block = get_block(block_root) - block != nil + GenServer.call(__MODULE__, {:has_block?, block_root}, @default_timeout) end @spec on_tick(Types.uint64()) :: :ok @@ -70,7 +71,7 @@ defmodule LambdaEthereumConsensus.ForkChoice do @spec init({BeaconState.t(), SignedBeaconBlock.t(), Types.uint64()}) :: {:ok, Store.t()} | {:stop, any} def init({anchor_state = %BeaconState{}, signed_anchor_block = %SignedBeaconBlock{}, time}) do - case Helpers.get_forkchoice_store(anchor_state, signed_anchor_block, true) do + case Store.get_forkchoice_store(anchor_state, signed_anchor_block, true) do {:ok, %Store{} = store} -> Logger.info("[Fork choice] Initialized store.") @@ -96,8 +97,8 @@ defmodule LambdaEthereumConsensus.ForkChoice do {:reply, Helpers.current_status_message(state), state} end - def handle_call({:get_block, block_root}, _from, state) do - {:reply, Store.get_block(state, block_root), state} + def handle_call({:has_block?, block_root}, _from, state) do + {:reply, Store.has_block?(state, block_root), state} end @impl GenServer @@ -113,6 +114,8 @@ defmodule LambdaEthereumConsensus.ForkChoice do {:ok, new_store} -> :telemetry.execute([:sync, :on_block], %{slot: slot}) Logger.info("[Fork choice] Block #{slot} added to the store.") + + Task.async(__MODULE__, :recompute_head, [new_store]) {:reply, :ok, new_store} {:error, reason} -> @@ -158,15 +161,15 @@ defmodule LambdaEthereumConsensus.ForkChoice do {:noreply, new_store} end + @impl GenServer + def handle_info(_msg, state) do + {:noreply, state} + end + ########################## ### Private Functions ########################## - @spec get_block(Types.root()) :: Types.SignedBeaconBlock.t() | nil - def get_block(block_root) do - GenServer.call(__MODULE__, {:get_block, block_root}, @default_timeout) - end - @spec get_store_attrs([atom()]) :: [any()] defp get_store_attrs(attrs) do GenServer.call(__MODULE__, {:get_store_attrs, attrs}, @default_timeout) @@ -191,7 +194,36 @@ defmodule LambdaEthereumConsensus.ForkChoice do {:ok, new_store} <- signed_block.message.body.attester_slashings |> apply_handler(new_store, &Handlers.on_attester_slashing/2) do - {:ok, new_store} + {:ok, Handlers.prune_checkpoint_states(new_store)} end end + + @spec recompute_head(Types.Store.t()) :: :ok + def recompute_head(store) do + {:ok, head_root} = Helpers.get_head(store) + + head_block = Store.get_block!(store, head_root) + head_execution_hash = head_block.body.execution_payload.block_hash + + finalized_checkpoint = store.finalized_checkpoint + finalized_block = Store.get_block!(store, store.finalized_checkpoint.root) + finalized_execution_hash = finalized_block.body.execution_payload.block_hash + + # TODO: do someting with the result from the execution client + # TODO: compute safe block hash + ExecutionClient.notify_forkchoice_updated( + head_execution_hash, + finalized_execution_hash, + finalized_execution_hash + ) + + BeaconChain.update_fork_choice_cache( + head_root, + head_block.slot, + finalized_checkpoint.root, + finalized_checkpoint.epoch + ) + + :ok + end end diff --git a/lib/lambda_ethereum_consensus/fork_choice/handlers.ex b/lib/lambda_ethereum_consensus/fork_choice/handlers.ex index cf99898d4..56679ac64 100644 --- a/lib/lambda_ethereum_consensus/fork_choice/handlers.ex +++ b/lib/lambda_ethereum_consensus/fork_choice/handlers.ex @@ -360,6 +360,14 @@ defmodule LambdaEthereumConsensus.ForkChoice.Handlers do end end + def prune_checkpoint_states(%Store{checkpoint_states: checkpoint_states} = store) do + finalized_epoch = store.finalized_checkpoint.epoch + + checkpoint_states + |> Map.reject(fn {%{epoch: epoch}, _} -> epoch < finalized_epoch end) + |> then(&%{store | checkpoint_states: &1}) + end + def update_latest_messages(%Store{} = store, attesting_indices, %Attestation{data: data}) do %AttestationData{target: target, beacon_block_root: beacon_block_root} = data messages = store.latest_messages diff --git a/lib/lambda_ethereum_consensus/fork_choice/helpers.ex b/lib/lambda_ethereum_consensus/fork_choice/helpers.ex index a7c8e4e79..8d8ba7c35 100644 --- a/lib/lambda_ethereum_consensus/fork_choice/helpers.ex +++ b/lib/lambda_ethereum_consensus/fork_choice/helpers.ex @@ -6,9 +6,6 @@ defmodule LambdaEthereumConsensus.ForkChoice.Helpers do alias LambdaEthereumConsensus.ForkChoice alias LambdaEthereumConsensus.StateTransition.{Accessors, Misc} alias LambdaEthereumConsensus.Store.BlockStore - alias Types.BeaconState - alias Types.Checkpoint - alias Types.SignedBeaconBlock alias Types.Store @spec current_status_message(Store.t()) :: @@ -30,50 +27,6 @@ defmodule LambdaEthereumConsensus.ForkChoice.Helpers do end end - @spec get_forkchoice_store(BeaconState.t(), SignedBeaconBlock.t(), boolean()) :: - {:ok, Store.t()} | {:error, String.t()} - def get_forkchoice_store( - %BeaconState{} = anchor_state, - %SignedBeaconBlock{message: anchor_block} = signed_block, - use_db - ) do - anchor_state_root = Ssz.hash_tree_root!(anchor_state) - anchor_block_root = Ssz.hash_tree_root!(anchor_block) - - if anchor_block.state_root == anchor_state_root do - anchor_epoch = Accessors.get_current_epoch(anchor_state) - - anchor_checkpoint = %Checkpoint{ - epoch: anchor_epoch, - root: anchor_block_root - } - - time = anchor_state.genesis_time + ChainSpec.get("SECONDS_PER_SLOT") * anchor_state.slot - - %Store{ - time: time, - genesis_time: anchor_state.genesis_time, - justified_checkpoint: anchor_checkpoint, - finalized_checkpoint: anchor_checkpoint, - unrealized_justified_checkpoint: anchor_checkpoint, - unrealized_finalized_checkpoint: anchor_checkpoint, - proposer_boost_root: <<0::256>>, - equivocating_indices: MapSet.new(), - blocks: %{}, - block_states: %{}, - checkpoint_states: %{anchor_checkpoint => anchor_state}, - latest_messages: %{}, - unrealized_justifications: %{anchor_block_root => anchor_checkpoint} - } - |> then(&if use_db, do: &1 |> Map.delete(:blocks) |> Map.delete(:block_states), else: &1) - |> Store.store_block(anchor_block_root, signed_block) - |> Store.store_state(anchor_block_root, anchor_state) - |> then(&{:ok, &1}) - else - {:error, "Anchor block state root does not match anchor state root"} - end - end - @spec get_head(Store.t()) :: {:ok, Types.root()} | {:error, any} def get_head(%Store{} = store) do # Get filtered block tree that only includes viable branches @@ -140,10 +93,7 @@ defmodule LambdaEthereumConsensus.ForkChoice.Helpers do end defp filter_block_tree(%Store{} = store, block_root, block, blocks) do - # TODO: this is highly inefficient. We should move to `ForkChoice.Tree` ASAP - children = - Store.get_blocks(store) - |> Enum.filter(fn {_, block} -> block.parent_root == block_root end) + children = Store.get_children(store, block_root) # If any children branches contain expected finalized/justified checkpoints, # add to filtered block-tree and signal viability to parent. diff --git a/lib/lambda_ethereum_consensus/fork_choice/tree.ex b/lib/lambda_ethereum_consensus/fork_choice/old_tree.ex similarity index 94% rename from lib/lambda_ethereum_consensus/fork_choice/tree.ex rename to lib/lambda_ethereum_consensus/fork_choice/old_tree.ex index 2b006d274..b31ecd38a 100644 --- a/lib/lambda_ethereum_consensus/fork_choice/tree.ex +++ b/lib/lambda_ethereum_consensus/fork_choice/old_tree.ex @@ -12,9 +12,7 @@ defmodule LambdaEthereumConsensus.ForkChoice.Tree do When requesting the head, a cached value will be returned instantly, according to the last calculated one. """ - use GenServer - require Logger defmodule Node do @moduledoc """ @@ -26,13 +24,13 @@ defmodule LambdaEthereumConsensus.ForkChoice.Tree do by the tree so manually assignment is not necessary. """ defstruct [:parent_id, :id, :children_ids, :self_weight, :subtree_weight] - @type id :: String.t() + @type id :: Types.root() @type t :: %Node{ parent_id: id | :root, id: id, children_ids: [id], - self_weight: integer(), - subtree_weight: integer() + self_weight: non_neg_integer(), + subtree_weight: non_neg_integer() } end @@ -43,7 +41,7 @@ defmodule LambdaEthereumConsensus.ForkChoice.Tree do ### Public API ########################## - @spec start_link(any) :: :ignore | {:error, any} | {:ok, pid} + @spec start_link(any()) :: GenServer.on_start() def start_link(_), do: GenServer.start_link(__MODULE__, [], name: __MODULE__) @doc """ @@ -61,7 +59,7 @@ defmodule LambdaEthereumConsensus.ForkChoice.Tree do Gets the head node according to LMD GHOST. The values are pre-calculated when adding nodes, so this operation is O(1). """ - @spec get_head :: Node.t() + @spec get_head() :: Node.t() def get_head, do: GenServer.call(__MODULE__, :get_head) ########################## @@ -69,7 +67,7 @@ defmodule LambdaEthereumConsensus.ForkChoice.Tree do ########################## @impl GenServer - @spec init(any) :: {:ok, status()} + @spec init(any()) :: {:ok, status()} def init(_), do: {:ok, %{root: nil, tree: %{}, head: nil}} @impl GenServer diff --git a/lib/lambda_ethereum_consensus/fork_choice/simple_tree.ex b/lib/lambda_ethereum_consensus/fork_choice/simple_tree.ex new file mode 100644 index 000000000..22a16a227 --- /dev/null +++ b/lib/lambda_ethereum_consensus/fork_choice/simple_tree.ex @@ -0,0 +1,121 @@ +defmodule LambdaEthereumConsensus.ForkChoice.Simple.Tree do + @moduledoc false + + defmodule Node do + @moduledoc false + defstruct [:parent_id, :id, :children_ids] + @type id :: Types.root() + @type parent_id :: id() | :root + @type t :: %__MODULE__{ + parent_id: parent_id(), + children_ids: [id] + } + end + + @enforce_keys [:root, :nodes] + defstruct [:root, :nodes] + + @type t() :: %__MODULE__{root: Node.id(), nodes: %{Node.id() => Node.t()}} + + ########################## + ### Public API + ########################## + + @spec new(Node.id()) :: t() + def new(root) when is_binary(root) do + root_node = %Node{parent_id: :root, children_ids: []} + %__MODULE__{root: root, nodes: %{root => root_node}} + end + + @spec add_block(t(), Node.id(), Node.id()) :: {:ok, t()} | {:error, :not_found} + def add_block(%__MODULE__{} = tree, block_root, parent_root) + when is_binary(block_root) and is_binary(parent_root) do + node = %Node{ + parent_id: parent_root, + children_ids: [] + } + + with {:ok, new_nodes} <- add_node_to_tree(tree.nodes, block_root, node) do + {:ok, %{tree | nodes: new_nodes}} + end + end + + @spec add_block!(t(), Node.id(), Node.id()) :: t() + def add_block!(tree, block_root, parent_root) do + case add_block(tree, block_root, parent_root) do + {:error, :not_found} -> raise "Parent #{Base.encode16(parent_root)} not found in tree" + {:ok, new_tree} -> new_tree + end + end + + @spec update_root(t(), Node.id()) :: {:ok, t()} | {:error, :not_found} + def update_root(%__MODULE__{root: root} = tree, root), do: {:ok, tree} + + def update_root(%__MODULE__{nodes: nodes}, new_root) do + case Map.get(nodes, new_root) do + nil -> + {:error, :not_found} + + node -> + get_subtree(nodes, new_root, %{node | parent_id: :root}) + |> then(&{:ok, %__MODULE__{root: new_root, nodes: &1}}) + end + end + + @spec update_root!(t(), Node.id()) :: t() + def update_root!(tree, new_root) do + case update_root(tree, new_root) do + {:error, :not_found} -> raise "Root #{Base.encode16(new_root)} not found in tree" + {:ok, new_tree} -> new_tree + end + end + + @spec get_children(t(), Node.id()) :: {:ok, [Node.id()]} | {:error, :not_found} + def get_children(%__MODULE__{nodes: nodes}, parent_id) do + case Map.get(nodes, parent_id) do + nil -> {:error, :not_found} + %{children_ids: ids} -> {:ok, ids} + end + end + + @spec get_children!(t(), Node.id()) :: [Node.id()] + def get_children!(tree, parent_id) do + case get_children(tree, parent_id) do + {:error, :not_found} -> raise "Parent #{Base.encode16(parent_id)} not found in tree" + {:ok, res} -> res + end + end + + @spec has_block?(t(), Node.id()) :: boolean() + def has_block?(tree, block_root), do: Map.has_key?(tree.nodes, block_root) + + ########################## + ### Private Functions + ########################## + + defp add_node_to_tree(nodes, block_root, %Node{parent_id: parent_id} = node) do + case Map.get(nodes, parent_id) do + nil -> + {:error, :not_found} + + parent -> + nodes + |> Map.put(block_root, node) + |> Map.replace!(parent_id, %{parent | children_ids: [block_root | parent.children_ids]}) + |> then(&{:ok, &1}) + end + end + + # Just for being explicit + defp get_subtree(_, id, %{children_ids: []} = node), do: %{id => node} + + defp get_subtree(nodes, id, node) do + node.children_ids + |> Enum.reduce(%{id => node}, fn child_id, acc -> + child = Map.fetch!(nodes, child_id) + + get_subtree(nodes, child_id, child) + |> Map.merge(acc) + end) + end +end diff --git a/lib/lambda_ethereum_consensus/state_transition/operations.ex b/lib/lambda_ethereum_consensus/state_transition/operations.ex index 3cb4263c9..6be9c9de1 100644 --- a/lib/lambda_ethereum_consensus/state_transition/operations.ex +++ b/lib/lambda_ethereum_consensus/state_transition/operations.ex @@ -234,13 +234,12 @@ defmodule LambdaEthereumConsensus.StateTransition.Operations do payload.timestamp != Misc.compute_timestamp_at_slot(state, state.slot) -> {:error, "Timestamp verification failed"} - # Verify the execution payload is valid if not mocked - verify_and_notify_new_payload.(payload) != {:ok, true} -> - {:error, "Invalid execution payload"} - # Cache execution payload header true -> - with {:ok, transactions_root} <- + # TODO: store execution status in block db + with {:ok, _status} <- + verify_and_notify_new_payload.(payload) |> handle_verify_payload_result(), + {:ok, transactions_root} <- Ssz.hash_list_tree_root_typed( payload.transactions, ChainSpec.get("MAX_TRANSACTIONS_PER_PAYLOAD"), @@ -952,4 +951,11 @@ defmodule LambdaEthereumConsensus.StateTransition.Operations do {:error, "deposits length mismatch"} end end + + defp handle_verify_payload_result({:ok, :valid = status}), do: {:ok, status} + defp handle_verify_payload_result({:ok, :optimistic = status}), do: {:ok, status} + defp handle_verify_payload_result({:ok, :invalid}), do: {:error, "Invalid execution payload"} + + defp handle_verify_payload_result({:error, error}), + do: {:error, "Error when calling execution client: #{error}"} end diff --git a/lib/lambda_ethereum_consensus/store/blocks.ex b/lib/lambda_ethereum_consensus/store/blocks.ex index f2b844ef0..52ce39d10 100644 --- a/lib/lambda_ethereum_consensus/store/blocks.ex +++ b/lib/lambda_ethereum_consensus/store/blocks.ex @@ -1,24 +1,31 @@ defmodule LambdaEthereumConsensus.Store.Blocks do @moduledoc false alias LambdaEthereumConsensus.Store.BlockStore + alias Types.SignedBeaconBlock use GenServer - @ets_block_by_hash __MODULE__ + @ets_block_by_hash :blocks_by_hash + @ets_ttl_data :"#{@ets_block_by_hash}_ttl_data" + @max_blocks 512 + @batch_prune_size 32 ########################## ### Public API ########################## + @spec start_link(any()) :: GenServer.on_start() def start_link(_opts) do GenServer.start_link(__MODULE__, [], name: __MODULE__) end + @spec store_block(Types.root(), SignedBeaconBlock.t()) :: :ok def store_block(block_root, signed_block) do cache_block(block_root, signed_block) GenServer.cast(__MODULE__, {:store_block, block_root, signed_block}) end + @spec get_block(Types.root()) :: SignedBeaconBlock.t() | nil def get_block(block_root), do: lookup(block_root) @spec clear() :: any() @@ -30,6 +37,15 @@ defmodule LambdaEthereumConsensus.Store.Blocks do @impl GenServer def init(_) do + :ets.new(@ets_ttl_data, [ + :ordered_set, + :private, + :named_table, + read_concurrency: false, + write_concurrency: false, + decentralized_counters: false + ]) + :ets.new(@ets_block_by_hash, [:set, :public, :named_table]) {:ok, nil} end @@ -37,7 +53,13 @@ defmodule LambdaEthereumConsensus.Store.Blocks do @impl GenServer def handle_cast({:store_block, block_root, signed_block}, state) do BlockStore.store_block(signed_block, block_root) - # TODO: remove old blocks from cache + handle_cast({:touch_entry, block_root}, state) + end + + @impl GenServer + def handle_cast({:touch_entry, block_root}, state) do + update_ttl(block_root) + prune_cache() {:noreply, state} end @@ -47,8 +69,12 @@ defmodule LambdaEthereumConsensus.Store.Blocks do defp lookup(block_root) do case :ets.lookup_element(@ets_block_by_hash, block_root, 2, nil) do - nil -> cache_miss(block_root) - block -> block + nil -> + cache_miss(block_root) + + block -> + GenServer.cast(__MODULE__, {:touch_entry, block_root}) + block end end @@ -69,7 +95,36 @@ defmodule LambdaEthereumConsensus.Store.Blocks do end defp cache_block(block_root, signed_block) do - :ets.insert_new(@ets_block_by_hash, {block_root, signed_block}) + :ets.insert_new(@ets_block_by_hash, {block_root, signed_block, nil}) + GenServer.cast(__MODULE__, {:touch_entry, block_root}) signed_block end + + defp update_ttl(block_root) do + delete_ttl(block_root) + uniq = :erlang.unique_integer([:monotonic]) + :ets.insert_new(@ets_ttl_data, {uniq, block_root}) + end + + defp delete_ttl(block_root) do + case :ets.lookup_element(@ets_block_by_hash, block_root, 3, nil) do + nil -> nil + uniq -> :ets.delete(@ets_ttl_data, uniq) + end + end + + defp prune_cache do + to_prune = :ets.info(@ets_block_by_hash, :size) - @max_blocks + + if to_prune > 0 do + {elems, _cont} = + :ets.select(@ets_ttl_data, [{:_, [], [:"$_"]}], to_prune + @batch_prune_size) + + elems + |> Enum.each(fn {uniq, root} -> + :ets.delete(@ets_ttl_data, uniq) + :ets.delete(@ets_block_by_hash, root) + end) + end + end end diff --git a/lib/lambda_ethereum_consensus/telemetry.ex b/lib/lambda_ethereum_consensus/telemetry.ex index f15070859..4a9f1878f 100644 --- a/lib/lambda_ethereum_consensus/telemetry.ex +++ b/lib/lambda_ethereum_consensus/telemetry.ex @@ -106,7 +106,8 @@ defmodule LambdaEthereumConsensus.Telemetry do last_value("vm.system_counts.process_count"), last_value("vm.system_counts.atom_count"), last_value("vm.system_counts.port_count"), - last_value("vm.message_queue.length", tags: [:process]) + last_value("vm.message_queue.length", tags: [:process]), + last_value("vm.uptime.total", unit: :millisecond) ] end @@ -114,10 +115,16 @@ defmodule LambdaEthereumConsensus.Telemetry do [ # A module, function and arguments to be invoked periodically. # This function must call :telemetry.execute/3 and a metric must be added above. - {__MODULE__, :message_queue_lengths, []} + {__MODULE__, :message_queue_lengths, []}, + {__MODULE__, :uptime, []} ] end + def uptime do + {uptime, _} = :erlang.statistics(:wall_clock) + :telemetry.execute([:vm, :uptime], %{total: uptime}) + end + defp register_queue_length(name, len) do :telemetry.execute([:vm, :message_queue], %{length: len}, %{process: inspect(name)}) end diff --git a/lib/spec/runners/fork_choice.ex b/lib/spec/runners/fork_choice.ex index fec035714..4fbe01160 100644 --- a/lib/spec/runners/fork_choice.ex +++ b/lib/spec/runners/fork_choice.ex @@ -105,7 +105,7 @@ defmodule ForkChoiceTestRunner do signed_block = %SignedBeaconBlock{message: anchor_block, signature: <<0::768>>} - {:ok, store} = Helpers.get_forkchoice_store(anchor_state, signed_block, false) + {:ok, store} = Store.get_forkchoice_store(anchor_state, signed_block, false) assert {:ok, _store} = apply_steps(case_dir, store, steps) end diff --git a/lib/spec/runners/operations.ex b/lib/spec/runners/operations.ex index dcdba4bf5..c7f821c22 100644 --- a/lib/spec/runners/operations.ex +++ b/lib/spec/runners/operations.ex @@ -97,8 +97,10 @@ defmodule OperationsTestRunner do YamlElixir.read_from_file!(case_dir <> "/execution.yaml") |> SpecTestUtils.sanitize_yaml() + status = if execution_valid, do: :valid, else: :invalid + result = - Operations.process_execution_payload(pre, body, fn _payload -> {:ok, execution_valid} end) + Operations.process_execution_payload(pre, body, fn _payload -> {:ok, status} end) case post do nil -> diff --git a/lib/types/store.ex b/lib/types/store.ex index a3cc0162c..737d28c80 100644 --- a/lib/types/store.ex +++ b/lib/types/store.ex @@ -2,6 +2,17 @@ defmodule Types.Store do @moduledoc """ The Store struct is used to track information required for the fork choice algorithm. """ + + alias LambdaEthereumConsensus.ForkChoice.Simple.Tree + alias LambdaEthereumConsensus.StateTransition.Accessors + alias LambdaEthereumConsensus.StateTransition.Misc + alias LambdaEthereumConsensus.Store.Blocks + alias LambdaEthereumConsensus.Store.StateStore + alias Types.BeaconBlock + alias Types.BeaconState + alias Types.Checkpoint + alias Types.SignedBeaconBlock + defstruct [ :time, :genesis_time, @@ -15,31 +26,72 @@ defmodule Types.Store do :block_states, :checkpoint_states, :latest_messages, - :unrealized_justifications + :unrealized_justifications, + # Stores block data on the current fork tree (~last two epochs) + :tree_cache ] @type t :: %__MODULE__{ time: Types.uint64(), genesis_time: Types.uint64(), - justified_checkpoint: Types.Checkpoint.t() | nil, - finalized_checkpoint: Types.Checkpoint.t(), - unrealized_justified_checkpoint: Types.Checkpoint.t() | nil, - unrealized_finalized_checkpoint: Types.Checkpoint.t() | nil, + justified_checkpoint: Checkpoint.t() | nil, + finalized_checkpoint: Checkpoint.t(), + unrealized_justified_checkpoint: Checkpoint.t() | nil, + unrealized_finalized_checkpoint: Checkpoint.t() | nil, proposer_boost_root: Types.root() | nil, equivocating_indices: MapSet.t(Types.validator_index()), - blocks: %{Types.root() => Types.BeaconBlock.t()}, - block_states: %{Types.root() => Types.BeaconState.t()}, - checkpoint_states: %{Types.Checkpoint.t() => Types.BeaconState.t()}, - latest_messages: %{Types.validator_index() => Types.Checkpoint.t()}, - unrealized_justifications: %{Types.root() => Types.Checkpoint.t()} + blocks: %{Types.root() => BeaconBlock.t()}, + block_states: %{Types.root() => BeaconState.t()}, + checkpoint_states: %{Checkpoint.t() => BeaconState.t()}, + latest_messages: %{Types.validator_index() => Checkpoint.t()}, + unrealized_justifications: %{Types.root() => Checkpoint.t()}, + tree_cache: Tree.t() } - alias LambdaEthereumConsensus.StateTransition.Misc - alias LambdaEthereumConsensus.Store.Blocks - alias LambdaEthereumConsensus.Store.BlockStore - alias LambdaEthereumConsensus.Store.StateStore - alias Types.BeaconState - alias Types.SignedBeaconBlock + @spec get_forkchoice_store(BeaconState.t(), SignedBeaconBlock.t(), boolean()) :: + {:ok, t()} | {:error, String.t()} + def get_forkchoice_store( + %BeaconState{} = anchor_state, + %SignedBeaconBlock{message: anchor_block} = signed_block, + use_db + ) do + anchor_state_root = Ssz.hash_tree_root!(anchor_state) + anchor_block_root = Ssz.hash_tree_root!(anchor_block) + + if anchor_block.state_root == anchor_state_root do + anchor_epoch = Accessors.get_current_epoch(anchor_state) + + anchor_checkpoint = %Checkpoint{ + epoch: anchor_epoch, + root: anchor_block_root + } + + time = anchor_state.genesis_time + ChainSpec.get("SECONDS_PER_SLOT") * anchor_state.slot + + %__MODULE__{ + time: time, + genesis_time: anchor_state.genesis_time, + justified_checkpoint: anchor_checkpoint, + finalized_checkpoint: anchor_checkpoint, + unrealized_justified_checkpoint: anchor_checkpoint, + unrealized_finalized_checkpoint: anchor_checkpoint, + proposer_boost_root: <<0::256>>, + equivocating_indices: MapSet.new(), + blocks: %{}, + block_states: %{}, + checkpoint_states: %{anchor_checkpoint => anchor_state}, + latest_messages: %{}, + unrealized_justifications: %{anchor_block_root => anchor_checkpoint}, + tree_cache: Tree.new(anchor_block_root) + } + |> then(&if use_db, do: &1 |> Map.delete(:blocks) |> Map.delete(:block_states), else: &1) + |> store_block(anchor_block_root, signed_block) + |> store_state(anchor_block_root, anchor_state) + |> then(&{:ok, &1}) + else + {:error, "Anchor block state root does not match anchor state root"} + end + end def get_current_slot(%__MODULE__{time: time, genesis_time: genesis_time}) do # NOTE: this assumes GENESIS_SLOT == 0 @@ -98,12 +150,12 @@ defmodule Types.Store do store end - @spec get_block(t(), Types.root()) :: Types.BeaconBlock.t() | nil + @spec get_block(t(), Types.root()) :: BeaconBlock.t() | nil def get_block(%__MODULE__{blocks: blocks}, block_root) do Map.get(blocks, block_root) end - @spec get_block(t(), Types.root()) :: Types.BeaconBlock.t() | nil + @spec get_block(t(), Types.root()) :: BeaconBlock.t() | nil def get_block(%__MODULE__{}, block_root) do case Blocks.get_block(block_root) do nil -> nil @@ -111,7 +163,7 @@ defmodule Types.Store do end end - @spec get_block!(t(), Types.root()) :: Types.BeaconBlock.t() + @spec get_block!(t(), Types.root()) :: BeaconBlock.t() def get_block!(store, block_root) do case get_block(store, block_root) do nil -> raise "Block not found: 0x#{Base.encode16(block_root)}" @@ -119,20 +171,37 @@ defmodule Types.Store do end end - @spec get_blocks(t()) :: Enumerable.t(Types.BeaconBlock.t()) - def get_blocks(%__MODULE__{blocks: blocks}), do: blocks - def get_blocks(%__MODULE__{}), do: BlockStore.stream_blocks() + @spec has_block?(t(), Types.root()) :: boolean() + def has_block?(%__MODULE__{tree_cache: tree}, block_root) do + Tree.has_block?(tree, block_root) + end + + @spec get_children(t(), Types.root()) :: [BeaconBlock.t()] + def get_children(%__MODULE__{tree_cache: tree} = store, parent_root) do + Tree.get_children!(tree, parent_root) + |> Enum.map(&{&1, get_block!(store, &1)}) + end @spec store_block(t(), Types.root(), SignedBeaconBlock.t()) :: t() def store_block(%__MODULE__{blocks: blocks} = store, block_root, %{message: block}) do - blocks - |> Map.put(block_root, block) - |> then(&%{store | blocks: &1}) + new_store = update_tree(store, block_root, block.parent_root) + %{new_store | blocks: Map.put(blocks, block_root, block)} end @spec store_block(t(), Types.root(), SignedBeaconBlock.t()) :: t() def store_block(%__MODULE__{} = store, block_root, %SignedBeaconBlock{} = signed_block) do Blocks.store_block(block_root, signed_block) - store + update_tree(store, block_root, signed_block.message.parent_root) + end + + defp update_tree(%__MODULE__{} = store, block_root, parent_root) do + # We expect the finalized block to be in the tree + tree = Tree.update_root!(store.tree_cache, store.finalized_checkpoint.root) + + case Tree.add_block(tree, block_root, parent_root) do + {:ok, new_tree} -> %{store | tree_cache: new_tree} + # Block is older than current finalized block + {:error, :not_found} -> store + end end end diff --git a/metrics/docker-compose.yml b/metrics/docker-compose.yml index 7519f9e28..a0fd06bdf 100644 --- a/metrics/docker-compose.yml +++ b/metrics/docker-compose.yml @@ -1,4 +1,4 @@ -version: '0.1' +version: '3' name: 'lambda-ethereum-consensus-grafana' services: @@ -6,14 +6,14 @@ services: image: prom/prometheus container_name: prometheus hostname: prometheus - ports: - - "9090:9090" volumes: + # prometheus configuration - ./prometheus:/etc/prometheus + # prometheus data - prometheus-data:/prometheus command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml networks: - open: + grafana-prometheus: aliases: - prometheus extra_hosts: @@ -25,17 +25,79 @@ services: ports: - "3000:3000" volumes: + # grafana configuration - ./grafana/provisioning:/etc/grafana/provisioning + # grafana data - grafana-data:/var/lib/grafana + environment: + # WARNING: use this for same-machine access ONLY + GF_AUTH_ANONYMOUS_ENABLED: "true" + GF_AUTH_DISABLE_LOGIN_FORM: "true" + GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin" networks: - open: + grafana-prometheus: + aliases: + - grafana + grafana-loki: aliases: - grafana + # Since the Loki containers are running as user 10001 and the mounted data volume is owned by root, + # Loki would not have permissions to create the directories. + # Therefore the init container changes permissions of the mounted directory. + loki-init: + image: &lokiImage grafana/loki:2.9.2 + user: root + entrypoint: + - "chown" + - "10001:10001" + - "/tmp/loki" + volumes: + - loki-data:/tmp/loki + + loki: + image: *lokiImage + container_name: loki + volumes: + # loki configuration + - ./loki:/etc/loki + # loki data + - loki-data:/tmp/loki + command: --config.file=/etc/loki/loki.yml + networks: + grafana-loki: + aliases: + - loki + loki-promtail: + aliases: + - loki + + promtail: + image: grafana/promtail + container_name: promtail + volumes: + # promtail configuration + - ./promtail:/etc/promtail + # logs to scrape + - ../logs:/var/log/consensus + # promtail data + - promtail-data:/tmp/promtail + command: --config.file=/etc/promtail/promtail.yml + networks: + loki-promtail: + aliases: + - promtail + networks: - open: + grafana-prometheus: + driver: bridge + grafana-loki: + driver: bridge + loki-promtail: driver: bridge volumes: prometheus-data: grafana-data: + loki-data: + promtail-data: diff --git a/metrics/grafana/provisioning/dashboards/home.json b/metrics/grafana/provisioning/dashboards/home.json index 6dd20e3a5..0746ecd92 100644 --- a/metrics/grafana/provisioning/dashboards/home.json +++ b/metrics/grafana/provisioning/dashboards/home.json @@ -36,54 +36,33 @@ "fieldConfig": { "defaults": { "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" + "fixedColor": "green", + "mode": "fixed" + }, + "decimals": 0, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "color": "dark-red", + "index": 0, + "text": "Down" + } + }, + "type": "special" } - }, - "mappings": [], + ], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null - }, - { - "color": "red", - "value": 80 } ] - } + }, + "unit": "ms" }, "overrides": [] }, @@ -93,19 +72,23 @@ "x": 0, "y": 0 }, - "id": 6, + "hideTimeOverride": true, + "id": 24, "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "textMode": "value_and_name" }, + "pluginVersion": "10.2.0", "targets": [ { "datasource": { @@ -113,28 +96,52 @@ "uid": "PBFA97CFB590B2093" }, "editorMode": "code", - "expr": "sync_store_slot{}", - "instant": false, - "legendFormat": "max_slot", + "expr": "vm_uptime_total{}", + "legendFormat": "Uptime", "range": true, - "refId": "Max slot" - }, + "refId": "Uptime" + } + ], + "timeFrom": "1s", + "type": "stat" + }, + { + "datasource": { + "type": "loki", + "uid": "P8E80F9AEF21F6940" + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 23, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Descending", + "wrapLogMessage": false + }, + "targets": [ { "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "editorMode": "code", - "expr": "sync_on_block_slot{}", - "hide": false, - "instant": false, - "legendFormat": "processed_slot", - "range": true, - "refId": "Processed slot" + "type": "loki", + "uid": "P8E80F9AEF21F6940" + }, + "editorMode": "builder", + "expr": "{job=\"lambda_ethereum_consensus\"} |= ``", + "maxLines": 100, + "queryType": "range", + "refId": "Raw logs" } ], - "title": "Current sync progress", - "type": "timeseries" + "title": "Raw logs", + "type": "logs" }, { "datasource": { @@ -198,10 +205,10 @@ "gridPos": { "h": 6, "w": 12, - "x": 12, - "y": 0 + "x": 0, + "y": 6 }, - "id": 4, + "id": 6, "options": { "legend": { "calcs": [], @@ -221,35 +228,27 @@ "uid": "PBFA97CFB590B2093" }, "editorMode": "code", - "exemplar": true, - "expr": "rate(network_request_count{result=\"success\"}[$__rate_interval])", - "hide": false, - "interval": "", - "legendFormat": "success", + "expr": "sync_store_slot{}", + "instant": false, + "legendFormat": "max_slot", "range": true, - "refId": "Success" + "refId": "Max slot" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "disableTextWrap": false, "editorMode": "code", - "exemplar": false, - "expr": "sum(rate(network_request_count{result=~\"error|retry\"}[$__rate_interval]))", - "fullMetaSearch": false, + "expr": "sync_on_block_slot{}", "hide": false, - "includeNullMetadata": true, "instant": false, - "interval": "", - "legendFormat": "error", + "legendFormat": "processed_slot", "range": true, - "refId": "Error", - "useBackend": false + "refId": "Processed slot" } ], - "title": "P2P Requests", + "title": "Current sync progress", "type": "timeseries" }, { @@ -276,10 +275,11 @@ "gridPos": { "h": 6, "w": 12, - "x": 0, + "x": 12, "y": 6 }, "id": 22, + "maxDataPoints": 50, "options": { "calculate": false, "calculation": { @@ -343,6 +343,176 @@ "title": "Block processing time", "type": "heatmap" }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisWidth": 1, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": -3, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "vm_memory_total{instance=\"host.docker.internal:9568\"}", + "interval": "", + "legendFormat": "total", + "refId": "Total" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "vm_memory_atom{instance=\"host.docker.internal:9568\"}", + "hide": false, + "interval": "", + "legendFormat": "atom", + "refId": "Atom" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "vm_memory_binary{instance=\"host.docker.internal:9568\"}", + "hide": false, + "interval": "", + "legendFormat": "binary", + "refId": "Binary" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "vm_memory_code{instance=\"host.docker.internal:9568\"}", + "hide": false, + "interval": "", + "legendFormat": "code", + "refId": "Code" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "vm_memory_ets{instance=\"host.docker.internal:9568\"}", + "hide": false, + "interval": "", + "legendFormat": "ETS", + "refId": "ETS" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "vm_memory_processes{instance=\"host.docker.internal:9568\"}", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "processes", + "refId": "Processes" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "vm_memory_system{instance=\"host.docker.internal:9568\"}", + "hide": false, + "interval": "", + "legendFormat": "system", + "refId": "System" + } + ], + "title": "VM Memory", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -405,8 +575,8 @@ "gridPos": { "h": 6, "w": 12, - "x": 12, - "y": 6 + "x": 0, + "y": 18 }, "id": 3, "options": { @@ -472,7 +642,6 @@ "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", - "axisWidth": 1, "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, @@ -500,7 +669,6 @@ } }, "mappings": [], - "min": -3, "thresholds": { "mode": "absolute", "steps": [ @@ -513,18 +681,17 @@ "value": 80 } ] - }, - "unit": "decbytes" + } }, "overrides": [] }, "gridPos": { "h": 6, - "w": 24, - "x": 0, - "y": 12 + "w": 12, + "x": 12, + "y": 18 }, - "id": 2, + "id": 12, "options": { "legend": { "calcs": [], @@ -543,87 +710,60 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "exemplar": true, - "expr": "vm_memory_total{instance=\"host.docker.internal:9568\"}", - "interval": "", - "legendFormat": "total", - "refId": "Total" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": true, - "expr": "vm_memory_atom{instance=\"host.docker.internal:9568\"}", - "hide": false, - "interval": "", - "legendFormat": "atom", - "refId": "Atom" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": true, - "expr": "vm_memory_binary{instance=\"host.docker.internal:9568\"}", - "hide": false, - "interval": "", - "legendFormat": "binary", - "refId": "Binary" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": true, - "expr": "vm_memory_code{instance=\"host.docker.internal:9568\"}", - "hide": false, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by (direction) (rate(port_message_count{direction=\"->elixir\"}[$__rate_interval]))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, "interval": "", - "legendFormat": "code", - "refId": "Code" + "legendFormat": "incoming", + "range": true, + "refId": "A", + "useBackend": false }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "exemplar": true, - "expr": "vm_memory_ets{instance=\"host.docker.internal:9568\"}", + "editorMode": "code", + "expr": "sum by (direction) (rate(port_message_count{direction=\"elixir->\"}[$__rate_interval]))", "hide": false, - "interval": "", - "legendFormat": "ETS", - "refId": "ETS" + "instant": false, + "legendFormat": "outgoing", + "range": true, + "refId": "B" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "exemplar": true, - "expr": "vm_memory_processes{instance=\"host.docker.internal:9568\"}", + "editorMode": "code", + "expr": "sum by () (rate(port_message_count{}[$__rate_interval]))", "hide": false, "instant": false, - "interval": "", - "legendFormat": "processes", - "refId": "Processes" + "legendFormat": "total", + "range": true, + "refId": "C" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "exemplar": true, - "expr": "vm_memory_system{instance=\"host.docker.internal:9568\"}", + "editorMode": "code", + "expr": "rate(port_message_count{}[$__rate_interval])", "hide": false, - "interval": "", - "legendFormat": "system", - "refId": "System" + "instant": false, + "legendFormat": "{{function}}", + "range": true, + "refId": "D" } ], - "title": "VM Memory", + "title": "Libp2pPort Messages", "type": "timeseries" }, { @@ -657,7 +797,7 @@ "h": 6, "w": 5, "x": 0, - "y": 18 + "y": 24 }, "id": 7, "options": { @@ -717,7 +857,7 @@ "h": 6, "w": 7, "x": 5, - "y": 18 + "y": 24 }, "id": 5, "options": { @@ -819,9 +959,9 @@ "h": 6, "w": 12, "x": 12, - "y": 18 + "y": 24 }, - "id": 12, + "id": 4, "options": { "legend": { "calcs": [], @@ -835,65 +975,41 @@ } }, "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "disableTextWrap": false, - "editorMode": "code", - "exemplar": false, - "expr": "sum by (direction) (rate(port_message_count{direction=\"->elixir\"}[$__rate_interval]))", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "interval": "", - "legendFormat": "incoming", - "range": true, - "refId": "A", - "useBackend": false - }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, "editorMode": "code", - "expr": "sum by (direction) (rate(port_message_count{direction=\"elixir->\"}[$__rate_interval]))", - "hide": false, - "instant": false, - "legendFormat": "outgoing", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "editorMode": "code", - "expr": "sum by () (rate(port_message_count{}[$__rate_interval]))", + "exemplar": true, + "expr": "rate(network_request_count{result=\"success\"}[$__rate_interval])", "hide": false, - "instant": false, - "legendFormat": "total", + "interval": "", + "legendFormat": "success", "range": true, - "refId": "C" + "refId": "Success" }, { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "disableTextWrap": false, "editorMode": "code", - "expr": "rate(port_message_count{}[$__rate_interval])", + "exemplar": false, + "expr": "sum(rate(network_request_count{result=~\"error|retry\"}[$__rate_interval]))", + "fullMetaSearch": false, "hide": false, + "includeNullMetadata": true, "instant": false, - "legendFormat": "{{function}}", + "interval": "", + "legendFormat": "error", "range": true, - "refId": "D" + "refId": "Error", + "useBackend": false } ], - "title": "Libp2pPort Messages", + "title": "P2P Requests", "type": "timeseries" }, { @@ -957,9 +1073,9 @@ }, "gridPos": { "h": 6, - "w": 24, + "w": 12, "x": 0, - "y": 24 + "y": 30 }, "id": 13, "options": { @@ -1019,11 +1135,12 @@ }, "gridPos": { "h": 6, - "w": 24, - "x": 0, + "w": 12, + "x": 12, "y": 30 }, "id": 14, + "maxDataPoints": 25, "options": { "calculate": false, "cellGap": 1, @@ -1942,6 +2059,6 @@ "timezone": "", "title": "Node", "uid": "90EXFQnIk", - "version": 4, + "version": 1, "weekStart": "" } diff --git a/metrics/grafana/provisioning/datasources/loki_ds.yml b/metrics/grafana/provisioning/datasources/loki_ds.yml new file mode 100644 index 000000000..6fcfd57c7 --- /dev/null +++ b/metrics/grafana/provisioning/datasources/loki_ds.yml @@ -0,0 +1,6 @@ +datasources: + - name: Loki + access: proxy + type: loki + url: http://loki:3100 + isDefault: false diff --git a/metrics/loki/loki.yml b/metrics/loki/loki.yml new file mode 100644 index 000000000..0abb31135 --- /dev/null +++ b/metrics/loki/loki.yml @@ -0,0 +1,49 @@ +auth_enabled: false + +server: + http_listen_port: 3100 + grpc_listen_port: 9096 + +common: + instance_addr: 127.0.0.1 + path_prefix: /tmp/loki + storage: + filesystem: + chunks_directory: /tmp/loki/chunks + rules_directory: /tmp/loki/rules + replication_factor: 1 + ring: + kvstore: + store: inmemory + +query_range: + results_cache: + cache: + embedded_cache: + enabled: true + max_size_mb: 100 + +schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h + +ruler: + alertmanager_url: http://localhost:9093 +# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration +# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/ +# +# Statistics help us better understand how Loki is used, and they show us performance +# levels for most users. This helps us prioritize features and documentation. +# For more information on what's sent, look at +# https://github.com/grafana/loki/blob/main/pkg/usagestats/stats.go +# Refer to the buildReport method to see what goes into a report. +# +# If you would like to disable reporting, uncomment the following lines: +#analytics: +# reporting_enabled: false diff --git a/metrics/promtail/promtail.yml b/metrics/promtail/promtail.yml new file mode 100644 index 000000000..99633e926 --- /dev/null +++ b/metrics/promtail/promtail.yml @@ -0,0 +1,35 @@ +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /tmp/promtail/positions.yaml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: + - job_name: system + static_configs: + - targets: + - localhost + labels: + job: lambda_ethereum_consensus + __path__: /var/log/consensus/*log + pipeline_stages: + - logfmt: + mapping: + ts: + level: + msg: + mfa: + process: registered_name + - timestamp: + format: "2006-01-02T15:04:05.000" + source: ts + - labels: + level: + mfa: + process: + - output: + source: msg diff --git a/mix.exs b/mix.exs index 65060d9ad..3cbe53c6f 100644 --- a/mix.exs +++ b/mix.exs @@ -39,6 +39,7 @@ defmodule LambdaEthereumConsensus.MixProject do {:snappyer, "~> 1.2"}, {:yaml_elixir, "~> 2.8"}, {:timex, "~> 3.7"}, + {:recase, "~> 0.5"}, {:rexbug, "~> 1.0"}, {:eep, git: "https://github.com/virtan/eep", branch: "master"}, {:protobuf, "~> 0.12.0"}, @@ -47,14 +48,16 @@ defmodule LambdaEthereumConsensus.MixProject do {:telemetry_poller, "~> 1.0"}, {:telemetry_metrics, "~> 0.6"}, {:telemetry_metrics_prometheus, "~> 1.1.0"}, + {:aja, "~> 0.6"}, + {:logfmt_ex, "~> 0.4.2"}, {:ex2ms, "~> 1.6", runtime: false}, {:eflambe, "~> 0.3.1"}, {:patch, "~> 0.12.0", only: [:test]}, {:stream_data, "~> 0.5", only: [:test]}, {:benchee, "~> 1.2", only: [:dev]}, - {:aja, "~> 0.6"}, {:dialyxir, "~> 1.1", only: [:dev, :test], runtime: false}, - {:credo, "~> 1.7", only: [:dev, :test], runtime: false} + {:credo, "~> 1.7", only: [:dev, :test], runtime: false}, + {:open_api_spex, "~> 3.18"} ] end diff --git a/mix.lock b/mix.lock index 8ba40ff29..03bc33118 100644 --- a/mix.lock +++ b/mix.lock @@ -27,11 +27,13 @@ "jason": {:hex, :jason, "1.4.1", "af1504e35f629ddcdd6addb3513c3853991f694921b1b9368b0bd32beb9f1b63", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "fbb01ecdfd565b56261302f7e1fcc27c4fb8f32d56eab74db621fc154604a7a1"}, "joken": {:hex, :joken, "2.6.0", "b9dd9b6d52e3e6fcb6c65e151ad38bf4bc286382b5b6f97079c47ade6b1bcc6a", [:mix], [{:jose, "~> 1.11.5", [hex: :jose, repo: "hexpm", optional: false]}], "hexpm", "5a95b05a71cd0b54abd35378aeb1d487a23a52c324fa7efdffc512b655b5aaa7"}, "jose": {:hex, :jose, "1.11.6", "613fda82552128aa6fb804682e3a616f4bc15565a048dabd05b1ebd5827ed965", [:mix, :rebar3], [], "hexpm", "6275cb75504f9c1e60eeacb771adfeee4905a9e182103aa59b53fed651ff9738"}, + "logfmt_ex": {:hex, :logfmt_ex, "0.4.2", "e337b6072bd21ad61d8bbe38d9c591b5a8e4869ceba4967699d027baedf2eec8", [:mix], [], "hexpm", "7fad3704383d4595adf0da873e72c8b393120e67b1257f9102da881fde9d4249"}, "meck": {:hex, :meck, "0.9.2", "85ccbab053f1db86c7ca240e9fc718170ee5bda03810a6292b5306bf31bae5f5", [:rebar3], [], "hexpm", "81344f561357dc40a8344afa53767c32669153355b626ea9fcbc8da6b3045826"}, "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, "mime": {:hex, :mime, "2.0.5", "dc34c8efd439abe6ae0343edbb8556f4d63f178594894720607772a041b04b02", [:mix], [], "hexpm", "da0d64a365c45bc9935cc5c8a7fc5e49a0e0f9932a761c55d6c52b142780a05c"}, "mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm", "f278585650aa581986264638ebf698f8bb19df297f66ad91b18910dfc6e19323"}, "nimble_options": {:hex, :nimble_options, "1.1.0", "3b31a57ede9cb1502071fade751ab0c7b8dbe75a9a4c2b5bbb0943a690b63172", [:mix], [], "hexpm", "8bbbb3941af3ca9acc7835f5655ea062111c9c27bcac53e004460dfd19008a99"}, + "open_api_spex": {:hex, :open_api_spex, "3.18.1", "0a73cd5dbcba7d32952dd9738c6819892933d9bae1642f04c9f200281524dd31", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "f52933cddecca675e42ead660379ae2d3853f57f5a35d201eaed85e2e81517d1"}, "parse_trans": {:hex, :parse_trans, "3.4.1", "6e6aa8167cb44cc8f39441d05193be6e6f4e7c2946cb2759f015f8c56b76e5ff", [:rebar3], [], "hexpm", "620a406ce75dada827b82e453c19cf06776be266f5a67cff34e1ef2cbb60e49a"}, "patch": {:hex, :patch, "0.12.0", "2da8967d382bade20344a3e89d618bfba563b12d4ac93955468e830777f816b0", [:mix], [], "hexpm", "ffd0e9a7f2ad5054f37af84067ee88b1ad337308a1cb227e181e3967127b0235"}, "phoenix": {:hex, :phoenix, "1.7.10", "02189140a61b2ce85bb633a9b6fd02dff705a5f1596869547aeb2b2b95edd729", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "cf784932e010fd736d656d7fead6a584a4498efefe5b8227e9f383bf15bb79d0"}, @@ -42,6 +44,7 @@ "plug_crypto": {:hex, :plug_crypto, "2.0.0", "77515cc10af06645abbfb5e6ad7a3e9714f805ae118fa1a70205f80d2d70fe73", [:mix], [], "hexpm", "53695bae57cc4e54566d993eb01074e4d894b65a3766f1c43e2c61a1b0f45ea9"}, "protobuf": {:hex, :protobuf, "0.12.0", "58c0dfea5f929b96b5aa54ec02b7130688f09d2de5ddc521d696eec2a015b223", [:mix], [{:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "75fa6cbf262062073dd51be44dd0ab940500e18386a6c4e87d5819a58964dc45"}, "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"}, + "recase": {:hex, :recase, "0.7.0", "3f2f719f0886c7a3b7fe469058ec539cb7bbe0023604ae3bce920e186305e5ae", [:mix], [], "hexpm", "36f5756a9f552f4a94b54a695870e32f4e72d5fad9c25e61bc4a3151c08a4e0c"}, "redbug": {:hex, :redbug, "1.2.2", "366d8961770ddc7bb5d209fbadddfa7271005487f938c087a0e385a57abfee33", [:rebar3], [], "hexpm", "b5fe7b94e487be559cb0ec1c0e938c9761205d3e91a96bf263bdf1beaebea729"}, "rexbug": {:hex, :rexbug, "1.0.6", "024071c67d970151fbdc06f299faf8db3e1b2ac759a28623a9cc80a517fc74f2", [:mix], [{:mix_test_watch, ">= 0.5.0", [hex: :mix_test_watch, repo: "hexpm", optional: true]}, {:redbug, "~> 1.2", [hex: :redbug, repo: "hexpm", optional: false]}], "hexpm", "148ea724979413e9fd84ca3b4bb5d2d8b840ac481adfd645f5846fda409a642c"}, "rustler": {:hex, :rustler, "0.29.1", "880f20ae3027bd7945def6cea767f5257bc926f33ff50c0d5d5a5315883c084d", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:toml, "~> 0.6", [hex: :toml, repo: "hexpm", optional: false]}], "hexpm", "109497d701861bfcd26eb8f5801fe327a8eef304f56a5b63ef61151ff44ac9b6"}, diff --git a/test/integration/fork_choice/handlers_test.exs b/test/integration/fork_choice/handlers_test.exs index 7457b1647..0b33d8ef9 100644 --- a/test/integration/fork_choice/handlers_test.exs +++ b/test/integration/fork_choice/handlers_test.exs @@ -21,7 +21,7 @@ defmodule Integration.ForkChoice.HandlersTest do {:ok, signed_block} = BlockStore.get_block_by_slot(state.slot) {:ok, new_signed_block} = BlockStore.get_block_by_slot(state.slot + 1) - assert {:ok, store} = Helpers.get_forkchoice_store(state, signed_block.message, false) + assert {:ok, store} = Store.get_forkchoice_store(state, signed_block.message, false) new_store = Handlers.on_tick(store, :os.system_time(:second)) assert {:ok, _} = Handlers.on_block(new_store, new_signed_block) diff --git a/test/unit/simple_tree_test.exs b/test/unit/simple_tree_test.exs new file mode 100644 index 000000000..dee9bb348 --- /dev/null +++ b/test/unit/simple_tree_test.exs @@ -0,0 +1,50 @@ +defmodule Unit.SimpleTreeTest do + use ExUnit.Case + + alias LambdaEthereumConsensus.ForkChoice.Simple.Tree + + test "Create a tree" do + Tree.new("root") + end + + test "Add new blocks to the tree" do + tree = + Tree.new("root") + |> Tree.add_block!("root_child1", "root") + |> Tree.add_block!("root_child2", "root") + |> Tree.add_block!("root_child1_child", "root_child1") + + # We use MapSet to ignore the order of the blocks + expected = MapSet.new(["root_child1", "root_child2"]) + root_children = Tree.get_children!(tree, "root") |> MapSet.new() + + assert MapSet.equal?(root_children, expected) + + assert Tree.get_children!(tree, "root_child1") == ["root_child1_child"] + assert Tree.get_children!(tree, "root_child1_child") == [] + assert Tree.get_children!(tree, "root_child2") == [] + end + + test "Update the tree's root" do + tree = + Tree.new("root") + |> Tree.add_block!("root_child1", "root") + |> Tree.add_block!("root_child2", "root") + |> Tree.add_block!("root_child1_child", "root_child1") + # Update tree's root and prune pre-root blocks + |> Tree.update_root!("root_child1") + + expected_tree = + Tree.new("root_child1") + |> Tree.add_block!("root_child1_child", "root_child1") + + assert tree == expected_tree + + error = {:error, :not_found} + assert Tree.get_children(tree, "root") == error, "root should be pruned" + assert Tree.get_children(tree, "root_child2") == error, "cousins should be pruned" + + assert Tree.get_children!(tree, "root_child1") == ["root_child1_child"] + assert Tree.get_children!(tree, "root_child1_child") == [] + end +end